From 809f07c6359e76a6ce87b1c23884d33e9ec8923f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:02 -0500 Subject: [PATCH 0001/1534] New translations global.json (Romanian) --- website/src/pages/ro/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ro/global.json b/website/src/pages/ro/global.json index 250299eb3773..45919017e38f 100644 --- a/website/src/pages/ro/global.json +++ b/website/src/pages/ro/global.json @@ -1,14 +1,35 @@ { - "collapse": "Minimizează", - "expand": "Expandează", - "previous": "Anterior", - "next": "Următorul", - "editPage": "Editează pagina", - "pageSections": "Secțiuni de pagină", - "linkToThisSection": "Link către această secțiune", - "technicalLevelRequired": "Nivel tehnic necesar", - "notFoundTitle": "Hopa! Această pagină a fost pierdută în spațiu...", - "notFoundSubtitle": "Verifică dacă utilizezi adresa corectă sau explorează site-ul nostru făcând clic pe linkul de mai jos.", - "goHome": "Mergi la pagina principală", - "video": "Video" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgrafuri", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 125881f4931cc81c50b099ffbe225bd2cd704fcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:03 -0500 Subject: [PATCH 0002/1534] New translations global.json (French) --- website/src/pages/fr/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/fr/global.json b/website/src/pages/fr/global.json index 7041afc44a5a..71ccdec34af5 100644 --- a/website/src/pages/fr/global.json +++ b/website/src/pages/fr/global.json @@ -1,14 +1,35 @@ { - "collapse": "Abattement", - "expand": "Développer", - "previous": "Précédente", - "next": "Suivante", - "editPage": "Modifier une page", - "pageSections": "Sections de la page", - "linkToThisSection": "Lien vers cette section", - "technicalLevelRequired": "Niveau technique exigé", - "notFoundTitle": "Oups ! Cette page a été perdue au cours de l'espace...", - "notFoundSubtitle": "Vérifiez si vous utilisez la bonne adresse ou explorez notre site web par cliquant sur le lien ci-dessous.", - "goHome": "Rentrer à la page d'accueil", - "video": "La vidéo" + "navigation": { + "title": "La Navigation principale", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From aedfa4fa20113654ca1d94cb824cf5c83837a968 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:04 -0500 Subject: [PATCH 0003/1534] New translations global.json (Spanish) --- website/src/pages/es/global.json | 45 ++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/website/src/pages/es/global.json b/website/src/pages/es/global.json index 9b93320ca881..b9c8db5fa5fa 100644 --- a/website/src/pages/es/global.json +++ b/website/src/pages/es/global.json @@ -1,18 +1,35 @@ { - "collapse": "Colapsar", - "expand": "Expandir", - "previous": "Anterior", - "next": "Siguiente", - "editPage": "Editar página", - "pageSections": "Sección de la página", - "linkToThisSection": "Enlace a esta sección", - "technicalLevelRequired": "Nivel técnico requerido", - "notFoundTitle": "¡Ups! Esta página se ha perdido en el espacio...", - "notFoundSubtitle": "Verifica que estés usando la dirección correcta o visita nuestro sitio web haciendo clic en el enlace de abajo.", - "goHome": "Ir a la página principal", - "video": "Video", - "sidebar": { + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", "subgraphs": "Subgrafos", - "indexing": "Indexación" + "substreams": "Corrientes secundarias", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" } } From 67039868ca8d75498ace6a7922ce60721d81e428 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:05 -0500 Subject: [PATCH 0004/1534] New translations global.json (Arabic) --- website/src/pages/ar/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ar/global.json b/website/src/pages/ar/global.json index 8fca1ea1ceba..b543fd624f0e 100644 --- a/website/src/pages/ar/global.json +++ b/website/src/pages/ar/global.json @@ -1,14 +1,35 @@ { - "collapse": "طي", - "expand": "توسيع", - "previous": "السابق", - "next": "التالي", - "editPage": "تعديل الصفحة", - "pageSections": "أقسام الصفحة", - "linkToThisSection": "رابط لهذا القسم", - "technicalLevelRequired": "المستوى التقني المطلوب", - "notFoundTitle": "عفوا! هذه الصفحة ضاعت في الفضاء...", - "notFoundSubtitle": "تحقق مما إذا كنت تستخدم العنوان الصحيح أو استكشف موقعنا على الويب من خلال النقر على الرابط أدناه.", - "goHome": "الذهاب للرئيسية", - "video": "Video" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "متعدد-السلاسل", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 6af75c433a79082c508d2a4f0c2e958b3de68177 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:06 -0500 Subject: [PATCH 0005/1534] New translations global.json (Czech) --- website/src/pages/cs/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/cs/global.json b/website/src/pages/cs/global.json index d8fc9fedee52..c431472eb4f5 100644 --- a/website/src/pages/cs/global.json +++ b/website/src/pages/cs/global.json @@ -1,14 +1,35 @@ { - "collapse": "Sbalit", - "expand": "Zvětšit", - "previous": "Předchozí", - "next": "Další", - "editPage": "Upravit stránku", - "pageSections": "Stránka Sekce", - "linkToThisSection": "Odkaz na tuto sekci", - "technicalLevelRequired": "Požadovaná technická úroveň", - "notFoundTitle": "Oops! Tato stránka se ztratila ve vesmíru...", - "notFoundSubtitle": "Zkontrolujte, zda používáte správnou adresu, nebo si prohlédněte naše webové stránky kliknutím na níže uvedený odkaz.", - "goHome": "Jdi domů", - "video": "Video" + "navigation": { + "title": "Hlavní navigace", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Podgrafy", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 60fe05f40853551d83626ce13e5d3ed1ecf7f0bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:07 -0500 Subject: [PATCH 0006/1534] New translations global.json (German) --- website/src/pages/de/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/de/global.json b/website/src/pages/de/global.json index bf08c67e0543..424bff2965bc 100644 --- a/website/src/pages/de/global.json +++ b/website/src/pages/de/global.json @@ -1,14 +1,35 @@ { - "collapse": "Collapse", - "expand": "Erweitern", - "previous": "Zurück", - "next": "Weiter", - "editPage": "Seite bearbeiten", - "pageSections": "Seitenabschnitte", - "linkToThisSection": "Link zu diesem Abschnitt", - "technicalLevelRequired": "Erforderliches technisches Niveau", - "notFoundTitle": "Ups! Diese Seite ging im Weltraum verloren...", - "notFoundSubtitle": "Überprüfen Sie, ob Sie die richtige Adresse verwenden, oder besuchen Sie unsere Website, indem Sie auf den unten stehenden Link klicken.", - "goHome": "Zurück zur Startseite", - "video": "Video" + "navigation": { + "title": "Hauptmenü", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Ressourcen", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 462d8292ea7afab4f76737dfa4038103d86910d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:08 -0500 Subject: [PATCH 0007/1534] New translations global.json (Italian) --- website/src/pages/it/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/it/global.json b/website/src/pages/it/global.json index 07831b3b64a8..f0bd80d9715b 100644 --- a/website/src/pages/it/global.json +++ b/website/src/pages/it/global.json @@ -1,14 +1,35 @@ { - "collapse": "Riduci", - "expand": "Espandi", - "previous": "Precedente", - "next": "Successivo", - "editPage": "Modifica pagina", - "pageSections": "Sezioni della pagina", - "linkToThisSection": "Collegamento a questa sezione", - "technicalLevelRequired": "Livello tecnico richiesto", - "notFoundTitle": "Oops! Questa pagina è andata persa nello spazio...", - "notFoundSubtitle": "Verifica se stai utilizzando l'indirizzo corretto o esplora il nostro sito web cliccando sul link qui sotto.", - "goHome": "Torna alla pagina iniziale", - "video": "Video" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From d58ea588dae97ac9d6b8d9a9310a5ad582748d8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:09 -0500 Subject: [PATCH 0008/1534] New translations global.json (Japanese) --- website/src/pages/ja/global.json | 45 ++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ja/global.json b/website/src/pages/ja/global.json index e339b09ca425..6326992e205b 100644 --- a/website/src/pages/ja/global.json +++ b/website/src/pages/ja/global.json @@ -1,18 +1,35 @@ { - "collapse": "崩壊", - "expand": "拡大", - "previous": "前", - "next": "次", - "editPage": "ページを編集", - "pageSections": "ページ セクション", - "linkToThisSection": "このセクションへのリンク", - "technicalLevelRequired": "必要な技術レベル", - "notFoundTitle": "おっとっと!このページはスペースで失われました...", - "notFoundSubtitle": "以下のリンクをクリックして、正しいアドレスを使用しているかどうかを確認し、また、ウェブサイトをご覧ください。", - "goHome": "家に帰れ", - "video": "ビデオ", - "sidebar": { + "navigation": { + "title": "メインナビゲーション", + "show": "Show navigation", + "hide": "Hide navigation", "subgraphs": "サブグラフ", - "indexing": "インデキシング" + "substreams": "サブストリーム", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" } } From 5f87074a25eb32cbfd73c9ad3d1f734b10df4e32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:10 -0500 Subject: [PATCH 0009/1534] New translations global.json (Korean) --- website/src/pages/ko/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ko/global.json b/website/src/pages/ko/global.json index 6a3eb234bfce..f0bd80d9715b 100644 --- a/website/src/pages/ko/global.json +++ b/website/src/pages/ko/global.json @@ -1,14 +1,35 @@ { - "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", - "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", - "video": "Video" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From fc66b53a95c08201327773698fe7d9c69eb6c7b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:11 -0500 Subject: [PATCH 0010/1534] New translations global.json (Dutch) --- website/src/pages/nl/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/nl/global.json b/website/src/pages/nl/global.json index 199a069285a4..c90c8a637061 100644 --- a/website/src/pages/nl/global.json +++ b/website/src/pages/nl/global.json @@ -1,14 +1,35 @@ { - "collapse": "Inklappen", - "expand": "Uitvouwen", - "previous": "Vorige", - "next": "Volgende", - "editPage": "Pagina bewerken", - "pageSections": "Pagina Selecties", - "linkToThisSection": "Link naar deze selectie", - "technicalLevelRequired": "Technisch Niveau Vereist", - "notFoundTitle": "Oeps! Deze pagina is in de ruimte verloren gegaan...", - "notFoundSubtitle": "Controleer of je het juiste adres gebruikt of navigeer onze website door op de link hieronder te clicken.", - "goHome": "Naar De Home Pagina Gaan", - "video": "Video" + "navigation": { + "title": "Startpagina", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From b05391db909a78d8bdcdd9f4e9f3ad82e7256508 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:12 -0500 Subject: [PATCH 0011/1534] New translations global.json (Polish) --- website/src/pages/pl/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/pl/global.json b/website/src/pages/pl/global.json index 3868b46a2392..9b22568b5199 100644 --- a/website/src/pages/pl/global.json +++ b/website/src/pages/pl/global.json @@ -1,14 +1,35 @@ { - "collapse": "Zwiń", - "expand": "Rozwiń", - "previous": "Poprzedni", - "next": "Następny", - "editPage": "Edytuj stronę", - "pageSections": "Sekcje strony", - "linkToThisSection": "Link do tej sekcji", - "technicalLevelRequired": "Wymagana wiedza techniczna", - "notFoundTitle": "Ups! Ta strona mogła zagubić się w kosmosie...", - "notFoundSubtitle": "Sprawdź, czy został wpisany poprawny adres strony lub odwiedź naszą stronę, używając linku poniżej.", - "goHome": "Wróć do strony glównej", - "video": "Wideo" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgrafy", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 18d9cd6a767272111b14d4fd32f40a5efdf214e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:13 -0500 Subject: [PATCH 0012/1534] New translations global.json (Portuguese) --- website/src/pages/pt/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/pt/global.json b/website/src/pages/pt/global.json index 725262569646..dfa39b21d79b 100644 --- a/website/src/pages/pt/global.json +++ b/website/src/pages/pt/global.json @@ -1,14 +1,35 @@ { - "collapse": "Esconder", - "expand": "Expandir", - "previous": "Anterior", - "next": "Próximo", - "editPage": "Editar página", - "pageSections": "Seções da Página", - "linkToThisSection": "Link para esta seção", - "technicalLevelRequired": "Nível Técnico Exigido", - "notFoundTitle": "Ops! Esta página foi pro espaço...", - "notFoundSubtitle": "Confira se o endereço está certo, ou clique o atalho abaixo para explorar o nosso sítio.", - "goHome": "Página Inicial", - "video": "Vídeo" + "navigation": { + "title": "Navegação principal", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Recursos", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 40bbbb45bed0e6c49494d3d3eca8c082e844e155 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:14 -0500 Subject: [PATCH 0013/1534] New translations global.json (Russian) --- website/src/pages/ru/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ru/global.json b/website/src/pages/ru/global.json index 3f928c7a9108..0b02b6ff1575 100644 --- a/website/src/pages/ru/global.json +++ b/website/src/pages/ru/global.json @@ -1,14 +1,35 @@ { - "collapse": "Свернуть", - "expand": "Развернуть", - "previous": "Предыдущий", - "next": "Следующий", - "editPage": "Редактировать страницу", - "pageSections": "Разделы страницы", - "linkToThisSection": "Ссылка на этот раздел", - "technicalLevelRequired": "Требуемый технический уровень", - "notFoundTitle": "Упс! Эту страницу потеряли в космосе...", - "notFoundSubtitle": "Проверьте верно ли указан адрес или перейдите на наш сайт по ссылке ниже.", - "goHome": "На главную страницу", - "video": "Видео" + "navigation": { + "title": "Главное меню", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Субграфы", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 5e460fd4c09f50439d258226e6d5fa76874ba9e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:15 -0500 Subject: [PATCH 0014/1534] New translations global.json (Swedish) --- website/src/pages/sv/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/sv/global.json b/website/src/pages/sv/global.json index 8a35bb090097..3793fbf29d78 100644 --- a/website/src/pages/sv/global.json +++ b/website/src/pages/sv/global.json @@ -1,14 +1,35 @@ { - "collapse": "Kollaps", - "expand": "Expandera", - "previous": "Tidigare", - "next": "Nästa", - "editPage": "Redigera sida", - "pageSections": "Sektioner på sidan", - "linkToThisSection": "Länk till detta avsnitt", - "technicalLevelRequired": "Teknisk nivå krävs", - "notFoundTitle": "Hoppsan! Den här sidan försvann i rymden...", - "notFoundSubtitle": "Kontrollera om du använder rätt adress eller utforska vår webbplats genom att klicka på länken nedan.", - "goHome": "Gå hem", - "video": "Video" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgrafer", + "substreams": "Underströmmar", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 04e9753b8c0c779a92eab4159bba08147fcb83ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:17 -0500 Subject: [PATCH 0015/1534] New translations global.json (Turkish) --- website/src/pages/tr/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/tr/global.json b/website/src/pages/tr/global.json index 4754a8d03971..60b4d779ddda 100644 --- a/website/src/pages/tr/global.json +++ b/website/src/pages/tr/global.json @@ -1,14 +1,35 @@ { - "collapse": "Çöküş", - "expand": "Genişletme", - "previous": "Önceki", - "next": "Sonraki", - "editPage": "Sayfayı Düzenle", - "pageSections": "Sayfa Bölümleri", - "linkToThisSection": "Bu bölüme bağlantı", - "technicalLevelRequired": "Gerekli Teknik Seviye", - "notFoundTitle": "Hata! Bu sayfa kayboldu gitti...", - "notFoundSubtitle": "Doğru adresi kullanıp kullanmadığınızı kontrol edin veya aşağıdaki bağlantıya tıklayarak web sitemize göz atın.", - "goHome": "Anasayfaya Git", - "video": "Video" + "navigation": { + "title": "Ana navigasyon", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraph'ler", + "substreams": "Substream'ler", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 1592cb3093dc1e340be6570508685d67dcf5cb80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:18 -0500 Subject: [PATCH 0016/1534] New translations global.json (Ukrainian) --- website/src/pages/uk/global.json | 45 ++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/website/src/pages/uk/global.json b/website/src/pages/uk/global.json index ad9b76d67c02..6959b66fd4a7 100644 --- a/website/src/pages/uk/global.json +++ b/website/src/pages/uk/global.json @@ -1,18 +1,35 @@ { - "collapse": "Колапс", - "expand": "Розгорнути", - "previous": "Попередній", - "next": "Далі", - "editPage": "Редагування сторінки", - "pageSections": "Розділи сторінки", - "linkToThisSection": "Посилання на даний розділ", - "technicalLevelRequired": "Необхідний технічний рівень", - "notFoundTitle": "Упс! Ця сторінка загубилася в космосі...", - "notFoundSubtitle": "Перевірте, чи використовуєте ви правильну URL-адресу, або відвідайте наш вебсайт, натиснувши на посилання нижче.", - "goHome": "На головну", - "video": "Відео", - "sidebar": { + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", "subgraphs": "Підграфи", - "indexing": "Індексація" + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" } } From 33a5d47d9bf338480cb3da2cd4a3cb1990b289d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:18 -0500 Subject: [PATCH 0017/1534] New translations global.json (Chinese Simplified) --- website/src/pages/zh/global.json | 45 ++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/website/src/pages/zh/global.json b/website/src/pages/zh/global.json index f5e381703c1f..63c04e346008 100644 --- a/website/src/pages/zh/global.json +++ b/website/src/pages/zh/global.json @@ -1,18 +1,35 @@ { - "collapse": "收起", - "expand": "打开", - "previous": "上页", - "next": "下页", - "editPage": "编辑", - "pageSections": "页面分节", - "linkToThisSection": "链到本节", - "technicalLevelRequired": "技术要求", - "notFoundTitle": "哦! 这个页面丢失了...", - "notFoundSubtitle": "检查您是否使用正确地址或通过单击以下链接浏览网站。", - "goHome": "主页", - "video": "视频", - "sidebar": { + "navigation": { + "title": "主导航", + "show": "Show navigation", + "hide": "Hide navigation", "subgraphs": "子图", - "indexing": "索引" + "substreams": "子流", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" } } From 2beeab6b28b5bbf313b5982311c1027f24c3b2c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:20 -0500 Subject: [PATCH 0018/1534] New translations global.json (Urdu (Pakistan)) --- website/src/pages/ur/global.json | 47 ++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ur/global.json b/website/src/pages/ur/global.json index 35de1f67cfaa..0f8266151ab8 100644 --- a/website/src/pages/ur/global.json +++ b/website/src/pages/ur/global.json @@ -1,18 +1,35 @@ { - "collapse": "گر جانا", - "expand": "پھیلنا", - "previous": "پچھلا", - "next": "اگلا", - "editPage": "صفحہ میں ترمیم کریں", - "pageSections": "صفحہ کے حصے", - "linkToThisSection": "اس حصے سے لنک کریں", - "technicalLevelRequired": "تکنیکی سطح کی ضرورت ہے", - "notFoundTitle": "افوہ! یہ صفحہ خلا میں کھو گیا تھا...", - "notFoundSubtitle": "چیک کریں کہ آیا آپ صحیح پتہ استعمال کر رہے ہیں یا نیچے دیے گئے لنک پر کلک کر کے ہماری ویب سائٹ کو دریافت کریں.", - "goHome": "گھر جاو", - "video": "ویڈیو", - "sidebar": { - "subgraphs": "سبگراف", - "indexing": "انڈیکسنگ" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "سب گراف", + "substreams": "سب سٹریمز", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" } } From 86b0d8024713b433394bc06f4586de91d245c01e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:20 -0500 Subject: [PATCH 0019/1534] New translations global.json (Vietnamese) --- website/src/pages/vi/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/vi/global.json b/website/src/pages/vi/global.json index 85a32302db5f..f0bd80d9715b 100644 --- a/website/src/pages/vi/global.json +++ b/website/src/pages/vi/global.json @@ -1,14 +1,35 @@ { - "collapse": "Thu gọn", - "expand": "Mở rộng", - "previous": "Trước", - "next": "Tiếp", - "editPage": "Chỉnh sửa trang", - "pageSections": "Các Mục trang", - "linkToThisSection": "Liên kết đến mục này", - "technicalLevelRequired": "Yêu cầu Trình độ Kỹ thuật", - "notFoundTitle": "Ối! Trang này đã bị lạc mất trong không gian...", - "notFoundSubtitle": "Kiểm tra xem bạn có đang sử dụng đúng địa chỉ hay không hoặc khám phá trang web của chúng tôi bằng cách nhấp vào liên kết bên dưới.", - "goHome": "Về Trang chủ", - "video": "Video" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "Subgraphs", + "substreams": "Substreams", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 094ad15a7e19fee4b2231c9609e016b796e01066 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:21 -0500 Subject: [PATCH 0020/1534] New translations global.json (Marathi) --- website/src/pages/mr/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/mr/global.json b/website/src/pages/mr/global.json index 67a6e36d9852..b57692ddb6cf 100644 --- a/website/src/pages/mr/global.json +++ b/website/src/pages/mr/global.json @@ -1,14 +1,35 @@ { - "collapse": "संकुचित करा", - "expand": "विस्तृत करा", - "previous": "मागील", - "next": "पुढे", - "editPage": "पृष्ठ संपादित करा", - "pageSections": "पृष्ठ विभाग", - "linkToThisSection": "या विभागाचा दुवा", - "technicalLevelRequired": "तांत्रिक स्तर आवश्यक", - "notFoundTitle": "अरेरे! हे पान जागेत हरवले होते...", - "notFoundSubtitle": "तुम्ही योग्य पत्ता वापरत आहात का ते तपासा किंवा खालील लिंकवर क्लिक करून आमची वेबसाइट एक्सप्लोर करा.", - "goHome": "घरी जा", - "video": "व्हिडिओ" + "navigation": { + "title": "Main navigation", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "सबग्राफ", + "substreams": "उपप्रवाह", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 87935ba09fecd2f26377e637710c02b4fec70cee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:22 -0500 Subject: [PATCH 0021/1534] New translations global.json (Hindi) --- website/src/pages/hi/global.json | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/website/src/pages/hi/global.json b/website/src/pages/hi/global.json index ef894ae452d6..5b5292d8b096 100644 --- a/website/src/pages/hi/global.json +++ b/website/src/pages/hi/global.json @@ -1,14 +1,35 @@ { - "collapse": "गिरावट", - "expand": "बढ़ाना", - "previous": "पहले का", - "next": "अगला", - "editPage": "संपादित पेज", - "pageSections": "पृष्ठ अनुभाग", - "linkToThisSection": "इस अनुभाग से लिंक करें", - "technicalLevelRequired": "तकनीकी स्तर की आवश्यकता है", - "notFoundTitle": "उफ़! यह पृष्ठ अंतरिक्ष में खो गया था...", - "notFoundSubtitle": "जांचें कि क्या आप सही पते का उपयोग कर रहे हैं या नीचे दिए गए लिंक पर क्लिक करके हमारी वेबसाइट देखें।", - "goHome": "घर जाओ", - "video": "वीडियो" + "navigation": { + "title": "मुख्य नेविगेशन", + "show": "Show navigation", + "hide": "Hide navigation", + "subgraphs": "सबग्राफ", + "substreams": "सबस्ट्रीम", + "sps": "Substreams-Powered Subgraphs", + "indexing": "Indexing", + "resources": "Resources", + "archived": "Archived" + }, + "page": { + "lastUpdated": "Last updated", + "readingTime": { + "title": "Reading time", + "minutes": "minutes" + }, + "previous": "Previous page", + "next": "Next page", + "edit": "Edit on GitHub", + "onThisPage": "On this page", + "tableOfContents": "Table of contents", + "linkToThisSection": "Link to this section" + }, + "content": { + "note": "Note", + "video": "Video" + }, + "notFound": { + "title": "Oops! This page was lost in space...", + "subtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", + "back": "Go Home" + } } From 73643b930317ad6053cff57a004e2da20f9be7df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:24 -0500 Subject: [PATCH 0022/1534] New translations index.json (Romanian) --- website/src/pages/ro/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/ro/index.json b/website/src/pages/ro/index.json index 63c3c940c6fb..c4927f816558 100644 --- a/website/src/pages/ro/index.json +++ b/website/src/pages/ro/index.json @@ -1,67 +1,99 @@ { - "title": "Începe", - "intro": "Află mai multe despre The Graph, un protocol descentralizat pentru indexarea și interogarea datelor din blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "Despre The Graph", - "description": "Învață mai multe despre The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgrafuri", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Quick Start", - "description": "Alătură-te și implică-te cu The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "FAQs al Developerilor", - "description": "Frequently asked questions" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Interoghează dintr-o Aplicație", - "description": "Învață sa interoghezi dintr-o aplicație" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Creează un Subgraf", - "description": "Folosește Studio pentru a crea subgrafuri" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Roluri de rețea", - "description": "Află mai multe despre rolurile din rețeaua The Graph.", - "roles": { - "developer": { - "title": "Developer", - "description": "Creați un subgraf sau utilizați subgrafuri existente într-o aplicație descentralizată" - }, - "indexer": { - "title": "Indexer", - "description": "Operează un nod pentru a indexa date și a servi interogări" - }, - "curator": { - "title": "Curator", - "description": "Organizați datele prin semnalizarea subgrafurilor" - }, - "delegator": { - "title": "Delegator", - "description": "Asigurați securitatea rețelei prin delegarea GRT către indexeri" - } + "supportedNetworks": { + "title": "Rețele suportate", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Află mai multe", - "products": { - "title": "Produse", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Creează, administrează și publică subgrafuri și chei de API" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Explorează subgrafurile și interacționează cu protocolul" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Rețele suportate", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 4cf928a7072ad9d345618d56a296ec23d64791fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:25 -0500 Subject: [PATCH 0023/1534] New translations index.json (French) --- website/src/pages/fr/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/fr/index.json b/website/src/pages/fr/index.json index dd7f5b249c72..48c1a676da74 100644 --- a/website/src/pages/fr/index.json +++ b/website/src/pages/fr/index.json @@ -1,67 +1,99 @@ { - "title": "Commencer", - "intro": "Découvrez The Graph, un protocole décentralisé d'indexation et d'interrogation des données provenant des blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "À propos de The Graph", - "description": "En savoir plus sur The Graph" + "title": "Accueil", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Construisez votre premier subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Démarrage rapide", - "description": "Lancez-vous et commencez avec The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Questions fréquentes des développeurs", - "description": "Questions fréquemment posées" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Requête depuis une application", - "description": "Apprenez à exécuter vos requêtes à partir d'une application" + "graphNode": { + "title": "Nœud de The Graph", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Créer un subgraph", - "description": "Utiliser le « Studio » pour créer des subgraphs" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Les divers rôles du réseau", - "description": "Découvrez les divers rôles du réseau The Graph.", - "roles": { - "developer": { - "title": "Développeur", - "description": "Créer un subgraph ou utiliser des subgraphs existants dans une application décentralisée" - }, - "indexer": { - "title": "Indexeur", - "description": "Utiliser un nœud pour indexer les données et répondre aux requêtes" - }, - "curator": { - "title": "Curateur", - "description": "Organiser les données en signalant les subgraphs" - }, - "delegator": { - "title": "Délégateur", - "description": "Sécuriser le réseau en déléguant des GRT aux indexeurs" - } + "supportedNetworks": { + "title": "Réseaux pris en charge", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Lire la suite", - "products": { - "title": "Produits", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Créer, gérer, déployer des subgraphs et des clés API" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Explorer les subgraphs et interagir avec le protocole" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Bonnes pratiques d'interrogation", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Réseaux pris en charge", - "description": "The Graph prend en charge les réseaux suivants.", - "footer": "Pour plus de détails, consultez la page {0}." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "Qu'est-ce que la délégation ?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From b37dfaed52f8caba404ef9412350b4a19b6bbc9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:26 -0500 Subject: [PATCH 0024/1534] New translations index.json (Spanish) --- website/src/pages/es/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/es/index.json b/website/src/pages/es/index.json index 0fd4bc82e691..c980229ff3d5 100644 --- a/website/src/pages/es/index.json +++ b/website/src/pages/es/index.json @@ -1,67 +1,99 @@ { - "title": "Comenzar", - "intro": "Aprende más sobre The Graph, un protocolo descentralizado para indexar y consultar datos de las blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "Acerca de The Graph", - "description": "Aprende más sobre The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgrafos", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Comienzo Rapido", - "description": "Entra y empieza con The Graph" + "substreams": { + "title": "Corrientes secundarias", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Preguntas Frecuentes de los Desarrolladores", - "description": "Preguntas frecuentes" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Consulta desde una Aplicación", - "description": "Aprender a consultar desde una aplicación" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Crear un Subgrafo", - "description": "Utiliza Studio para crear subgrafos" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Funciones de la Red", - "description": "Conoce las funciones de la red The Graph.", - "roles": { - "developer": { - "title": "Developer (desarrollador)", - "description": "Crear un subgrafo o utilizar subgrafos existentes en una dapp" - }, - "indexer": { - "title": "Indexador", - "description": "Opera un nodo para indexar los datos y proveer consultas" - }, - "curator": { - "title": "Curador", - "description": "Organiza los datos mediante la señalización de subgrafos" - }, - "delegator": { - "title": "Delegador", - "description": "Proteja la red delegando GRT a los indexadores" - } + "supportedNetworks": { + "title": "Redes Admitidas", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Leer más", - "products": { - "title": "Productos", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Crear, gestionar y publicar subgrafos y claves API" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Explora los distintos subgrafos e interactua con el protocolo" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Mejores Prácticas para Consultas", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Redes Admitidas", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 90db5c920903192653a0325c926b5a9c2bf80ed4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:27 -0500 Subject: [PATCH 0025/1534] New translations index.json (Arabic) --- website/src/pages/ar/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/ar/index.json b/website/src/pages/ar/index.json index ef9526840c44..0f2dfc58967a 100644 --- a/website/src/pages/ar/index.json +++ b/website/src/pages/ar/index.json @@ -1,67 +1,99 @@ { - "title": "البدء", - "intro": "تعرف على The Graph ، وهو بروتوكول لامركزي لفهرسة البيانات والاستعلام عنها من ال Blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "حول The Graph", - "description": "تعرف أكثر حول The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "بداية سريعة", - "description": "انضم إلينا وابدأ مع The Graph" + "substreams": { + "title": "متعدد-السلاسل", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "الأسئلة الشائعة للمطورين", - "description": "الأسئلة الشائعة" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "الاستعلام من التطبيق", - "description": "تعلم كيفية الاستعلام من التطبيق" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "إنشاء الـ Subgraph", - "description": "استخدم Studio لإنشاء subgraphs" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "قواعد الشبكة", - "description": "تعرف على قوانين شبكة The Graph.", - "roles": { - "developer": { - "title": "المطور", - "description": "قم بإنشاء Subgraph أو استخدم ال Subgraphs الموجودة في ال dapp" - }, - "indexer": { - "title": "فهرسة (indexing)", - "description": "تشغيل عقدة node وذلك لفهرسة البيانات وتقديم الاستعلامات" - }, - "curator": { - "title": "(التنسيق) curating", - "description": "تنظيم البيانات بواسطة الإشارة إلى subgraphs" - }, - "delegator": { - "title": "تفويض", - "description": "تأمين الشبكة عن طريق تفويض GRT للمفهرسين" - } + "supportedNetworks": { + "title": "الشبكات المدعومة", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "إقرأ المزيد", - "products": { - "title": "المنتجات", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "قم بإنشاء وإدارة ونشر ال Subgraphs و API keys" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "استكشف ال Subgraphsوتفاعل مع البروتوكول" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "أفضل الممارسات للاستعلام", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "الشبكات المدعومة", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From a3d00a34297bd605067c7d7daf021c780613bc5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:28 -0500 Subject: [PATCH 0026/1534] New translations index.json (Czech) --- website/src/pages/cs/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/cs/index.json b/website/src/pages/cs/index.json index 4dd97a91d425..2cea19c4ff1a 100644 --- a/website/src/pages/cs/index.json +++ b/website/src/pages/cs/index.json @@ -1,67 +1,99 @@ { - "title": "Začněte", - "intro": "Seznamte se s Grafu, decentralizovaným protokolem pro indexování a dotazování dat z blockchainů.", - "shortcuts": { - "aboutTheGraph": { - "title": "O grafu", - "description": "Další informace o Grafu" + "title": "Domov", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Podgrafy", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Rychlé spuštění", - "description": "Přejděte ke Grafu a začněte s ním" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "FAQs vývojářů", - "description": "Často kladené otázky" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Dotaz z aplikace", - "description": "Naučte se zadávat dotazy z aplikace" + "graphNode": { + "title": "Uzel Graf", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Vytvoření podgrafu", - "description": "Vytváření podgrafů pomocí Studio" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Síťové role", - "description": "Zjistěte více o rolích v síti Graf.", - "roles": { - "developer": { - "title": "Vývojář", - "description": "Vytvoření podgrafu nebo použití existujících podgrafů v dapp" - }, - "indexer": { - "title": "Indexer", - "description": "Provozování uzlu pro indexování dat a obsluhu dotazů" - }, - "curator": { - "title": "Kurátor", - "description": "Organizace dat pomocí signalizace na podgraf" - }, - "delegator": { - "title": "Delegát", - "description": "Zabezpečení sítě delegováním GRT na indexátory" - } + "supportedNetworks": { + "title": "Podporované sítě", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Přečtěte si více", - "products": { - "title": "Produkty", - "products": { - "subgraphStudio": { - "title": "Podgraf Studio", - "description": "Vytváření, správa a publikování podgrafů a klíčů API" - }, - "graphExplorer": { - "title": "Průzkumník grafů", - "description": "Prozkoumání podgrafů a interakce s protokolem" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Osvědčené postupy dotazování", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Podporované sítě", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From d9d9c84efb07870e2ce1b4fcae1cf5fcf811595e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:29 -0500 Subject: [PATCH 0027/1534] New translations index.json (German) --- website/src/pages/de/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/de/index.json b/website/src/pages/de/index.json index a3d38b854804..fd28f4bd87af 100644 --- a/website/src/pages/de/index.json +++ b/website/src/pages/de/index.json @@ -1,67 +1,99 @@ { - "title": "Los geht’s", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "About The Graph", - "description": "Learn more about The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Erstellen Sie Ihren ersten Subgraphen" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Schnellstart", - "description": "Jump in and start with The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Developer FAQs", - "description": "Frequently asked questions" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Query from an Application", - "description": "Learn to query from an application" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Network Roles", - "description": "Learn about The Graph’s network roles.", - "roles": { - "developer": { - "title": "Entwickler", - "description": "Create a subgraph or use existing subgraphs in a dapp" - }, - "indexer": { - "title": "Indexierer", - "description": "Operate a node to index data and serve queries" - }, - "curator": { - "title": "Kurator", - "description": "Organize data by signaling on subgraphs" - }, - "delegator": { - "title": "Delegierter", - "description": "Secure the network by delegating GRT to Indexers" - } + "supportedNetworks": { + "title": "Supported Networks", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Read more", - "products": { - "title": "Produkte", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Create, manage and publish subgraphs and API keys" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph unterstützt folgende Netzwerke.", - "footer": "Weitere Einzelheiten finden Sie auf der Seite {0}." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "Was ist Delegieren?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 8b1129136890bae8f388ac42493bd0f4302b4449 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:30 -0500 Subject: [PATCH 0028/1534] New translations index.json (Italian) --- website/src/pages/it/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/it/index.json b/website/src/pages/it/index.json index 10b8c6a7f7c2..787097b1fbc4 100644 --- a/website/src/pages/it/index.json +++ b/website/src/pages/it/index.json @@ -1,67 +1,99 @@ { - "title": "Iniziare", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "Informazioni su The Graph", - "description": "Learn more about The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Quick Start", - "description": "Jump in and start with The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Developer FAQs", - "description": "Frequently asked questions" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Query from an Application", - "description": "Learn to query from an application" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Network Roles", - "description": "Learn about The Graph’s network roles.", - "roles": { - "developer": { - "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" - }, - "indexer": { - "title": "Indexer", - "description": "Operate a node to index data and serve queries" - }, - "curator": { - "title": "Curator", - "description": "Organize data by signaling on subgraphs" - }, - "delegator": { - "title": "Delegator", - "description": "Secure the network by delegating GRT to Indexers" - } + "supportedNetworks": { + "title": "Supported Networks", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Leggi di più", - "products": { - "title": "Products", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Create, manage and publish subgraphs and API keys" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From fe3f4f1d71af7171dedc110aab93745bc43e9a5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:31 -0500 Subject: [PATCH 0029/1534] New translations index.json (Japanese) --- website/src/pages/ja/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/ja/index.json b/website/src/pages/ja/index.json index 7e8c957541eb..121f56e91166 100644 --- a/website/src/pages/ja/index.json +++ b/website/src/pages/ja/index.json @@ -1,67 +1,99 @@ { - "title": "始めましょう", - "intro": "ブロックチェーンのデータにインデックスを付けたり、クエリを実行するための分散型プロトコル「The Graph」についてご紹介します。", - "shortcuts": { - "aboutTheGraph": { - "title": "The Graphについて", - "description": "The Graphについて学ぶ" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "サブグラフ", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "クイックスタート", - "description": "まずは「The Graph」から始める" + "substreams": { + "title": "サブストリーム", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "開発者 FAQ", - "description": "よくある質問と回答" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "アプリケーションからのクエリ", - "description": "アプリケーションからのクエリ方法を学ぶ" + "graphNode": { + "title": "グラフノード", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "サブグラフの作成", - "description": "スタジオを使ってサブグラフを作成" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "ネットワークの役割", - "description": "The Graphのネットワークルールを学ぶ", - "roles": { - "developer": { - "title": "ディベロッパー", - "description": "サブグラフの作成及び既存のサブグラフを利用したdappの作成" - }, - "indexer": { - "title": "インデクサー", - "description": "ノードを稼働してデータインデックスを作成し、クエリサービスを提供する" - }, - "curator": { - "title": "学芸員", - "description": "サブグラフのシグナリングによるデータの整理" - }, - "delegator": { - "title": "委任者", - "description": "保有GRTをインデクサーに委任することでネットワークの安全性を確保" - } + "supportedNetworks": { + "title": "サポートされているネットワーク", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "続きを読む", - "products": { - "title": "プロダクト", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "サブグラフとAPIキーの作成、管理、公開" - }, - "graphExplorer": { - "title": "グラフエクスプローラ", - "description": "サブグラフの探索とプロトコルとの対話" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "クエリのベストプラクティス", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "サポートされているネットワーク", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From a1855ef1dc959fdd589104f9b20acfb69b56c253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:32 -0500 Subject: [PATCH 0030/1534] New translations index.json (Korean) --- website/src/pages/ko/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/ko/index.json b/website/src/pages/ko/index.json index 7b5de0b796fd..787097b1fbc4 100644 --- a/website/src/pages/ko/index.json +++ b/website/src/pages/ko/index.json @@ -1,67 +1,99 @@ { - "title": "Get Started", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "About The Graph", - "description": "Learn more about The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Quick Start", - "description": "Jump in and start with The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Developer FAQs", - "description": "Frequently asked questions" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Query from an Application", - "description": "Learn to query from an application" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Network Roles", - "description": "Learn about The Graph’s network roles.", - "roles": { - "developer": { - "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" - }, - "indexer": { - "title": "Indexer", - "description": "Operate a node to index data and serve queries" - }, - "curator": { - "title": "Curator", - "description": "Organize data by signaling on subgraphs" - }, - "delegator": { - "title": "Delegator", - "description": "Secure the network by delegating GRT to Indexers" - } + "supportedNetworks": { + "title": "Supported Networks", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Read more", - "products": { - "title": "Products", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Create, manage and publish subgraphs and API keys" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 787b8f7e51a2af6e6f48406f127ba44a54d0de75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:33 -0500 Subject: [PATCH 0031/1534] New translations index.json (Dutch) --- website/src/pages/nl/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/nl/index.json b/website/src/pages/nl/index.json index 357c0bbf8d61..c6000a7b4c14 100644 --- a/website/src/pages/nl/index.json +++ b/website/src/pages/nl/index.json @@ -1,67 +1,99 @@ { - "title": "Begin", - "intro": "Leer over The Graph, een gedecentraliseerd protocol voor het indexen en opvragen van data op blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "Over The Graph", - "description": "Leer meer over The Graph" + "title": "Start", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Snelle Start", - "description": "Spring erin en begin met The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Ontwikkelaar FAQs", - "description": "Veel gestelde vragen" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Vraag van een Applicatie", - "description": "Leer te vragen van een applicatie" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Creëer een Subgraph", - "description": "Gebruik Studio om een subgraph te bouwen" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Netwerk Rollen", - "description": "Leer over The Graph's netwerk rollen.", - "roles": { - "developer": { - "title": "Ontwikkelaar", - "description": "Creëer een subgraph of gebruik bestaande subgraphs in een dapp" - }, - "indexer": { - "title": "Indexer", - "description": "Operate a node to index data and serve queries" - }, - "curator": { - "title": "Curator", - "description": "Organiseer gegevens door het signaleren op subgraphs" - }, - "delegator": { - "title": "Delegator", - "description": "Beveilig het netwerk door het delegeren van GRT naar Indexers" - } + "supportedNetworks": { + "title": "Ondersteunde Netwerken", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Lees meer", - "products": { - "title": "Producten", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Creëer, beheer en publiceer subgraphs en API-sleutels" - }, - "graphExplorer": { - "title": "Graph Verkenner", - "description": "Verken subgraphs en interacteer met het protocol" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Ondersteunde Netwerken", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 3d1094982a322427e7815ec547d577fc189b4e93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:34 -0500 Subject: [PATCH 0032/1534] New translations index.json (Polish) --- website/src/pages/pl/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/pl/index.json b/website/src/pages/pl/index.json index da068b79078f..8670eb1a59ae 100644 --- a/website/src/pages/pl/index.json +++ b/website/src/pages/pl/index.json @@ -1,67 +1,99 @@ { - "title": "Jak zacząć?", - "intro": "Dowiedz się więcej o The Graph - zdecentralizowanym protokole indeksującym dane sieci blockchain i umożliwiającym tworzenie zapytań.", - "shortcuts": { - "aboutTheGraph": { - "title": "Więcej o The Graph", - "description": "Dowiedz się więcej o The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgrafy", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": " Na start", - "description": "Wskakuj i zacznij z The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "FAQs dla developerów", - "description": "Najczęściej zadawane pytania" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Zapytania z aplikacji", - "description": "Dowiedz się jak tworzyć zapytania z aplikacji" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Jak stworzyć subgraf", - "description": "Użyj aplikacji \"Studio\" by stworzyć subgraf" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Role w sieci", - "description": "Dowiedz się więcej o rolach w sieci The Graph.", - "roles": { - "developer": { - "title": "Developer", - "description": "Stwórz subgraf lub użyj istniejącego subgrafa w zdecentralizowanej aplikacji (dApp)" - }, - "indexer": { - "title": "Indekser", - "description": "Indeksuj dane i obsługuj zapytania przez prowadzenie własnego node'a" - }, - "curator": { - "title": "Kurator", - "description": "Organizuj dane przez sygnalizowanie subgrafów" - }, - "delegator": { - "title": "Delegator", - "description": "Zabezpiecz sieć przez delegowanie tokenu GRT do wybranych indekserów" - } + "supportedNetworks": { + "title": "Wspierane sieci", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Dowiedz się więcej", - "products": { - "title": "Produkty", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Twórz, zarządzaj i publikuj subgrafy i klucze API" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Eksploruj subgrafy i zacznij korzystać z protokołu" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Wspierane sieci", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 6c869fe2d0b6c0af5a62f998ea3d8778a0fbecb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:35 -0500 Subject: [PATCH 0033/1534] New translations index.json (Portuguese) --- website/src/pages/pt/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/pt/index.json b/website/src/pages/pt/index.json index 9d8d3f9a08d4..5b3df70bebad 100644 --- a/website/src/pages/pt/index.json +++ b/website/src/pages/pt/index.json @@ -1,67 +1,99 @@ { - "title": "Como Começar", - "intro": "Aprenda sobre o The Graph, um protocolo descentralizado para a indexação e consulta de dados de blockchains.", - "shortcuts": { - "aboutTheGraph": { - "title": "Sobre o The Graph", - "description": "Aprenda mais sobre o The Graph" + "title": "Início", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Construa o seu primeiro subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Começo Rápido", - "description": "Comece com o The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Perguntas Frequentes dos Programadores", - "description": "Perguntas frequentes" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Consulta de um Aplicativo", - "description": "Aprenda a consultar de um aplicativo" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Crie um Subgraph", - "description": "Use o Studio para criar subgraphs" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Papeis na Rede", - "description": "Aprenda sobre os papeis na rede do The Graph.", - "roles": { - "developer": { - "title": "Programador", - "description": "Crie um subgraph ou use subgraphs existentes em um dApp" - }, - "indexer": { - "title": "Indexador", - "description": "Opere um node para indexar dados e servir consultas" - }, - "curator": { - "title": "Curador", - "description": "Sinalize em subgraphs para organizar dados" - }, - "delegator": { - "title": "Delegante", - "description": "Delegue GRT para Indexadores e proteja a rede" - } + "supportedNetworks": { + "title": "Redes Apoiadas", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Leia mais", - "products": { - "title": "Produtos", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Crie, administre e publique subgraphs e chaves API" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Explore subgraphs e interaja com o protocolo" - } + "guides": { + "title": "Guias", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Etiqueta de Query", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Redes Apoiadas", - "description": "The Graph apoia as seguintes redes.", - "footer": "Para mais detalhes, veja a página {0}." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "O Que É Delegar?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min (\"Mínimo\")" } } From 7d389dfa5903ebeecef559596a14cb5a7bc47b56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:36 -0500 Subject: [PATCH 0034/1534] New translations index.json (Russian) --- website/src/pages/ru/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/ru/index.json b/website/src/pages/ru/index.json index 6f0d6145cb32..11e1eef7f22c 100644 --- a/website/src/pages/ru/index.json +++ b/website/src/pages/ru/index.json @@ -1,67 +1,99 @@ { - "title": "Начнем", - "intro": "Начни изучение протокола The Graph как распределенного протокола для индексации и обслуживания запросов данных из блокчейнов.", - "shortcuts": { - "aboutTheGraph": { - "title": "О The Graph", - "description": "Узнай больше о протоколе The Graph" + "title": "Главная страница", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Субграфы", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Быстрый старт", - "description": "Присоединяйтесь и начните работу с The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Часто задаваемые вопросы для разработчиков", - "description": "Часто задаваемые вопросы" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Запрос из приложения", - "description": "Узнайте, как отправлять запросы из приложения" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Создайте субграф", - "description": "Используйте Studio для создания субграфов" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Роли в сети", - "description": "Узнайте больше о ролях в сети The Graph.", - "roles": { - "developer": { - "title": "Разработчик", - "description": "Создайте подграф или используйте существующие подграфы в децентрализованном приложении" - }, - "indexer": { - "title": "Индексатор", - "description": "Управляйте нодой для индексации данных и обслуживания запросов" - }, - "curator": { - "title": "Куратор", - "description": "Организуйте данные, сигнализируя о подграфах" - }, - "delegator": { - "title": "Делегатор", - "description": "Обеспечьте безопасность сети, делегировав GRT индексаторам" - } + "supportedNetworks": { + "title": "Поддерживаемые сети", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Узнать больше", - "products": { - "title": "Продукты", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Создание, управление и публикация субграфов и ключей API" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Исследование подграфов и взаимодействие с протоколом" - } + "guides": { + "title": "Гайды", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Поддерживаемые сети", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 52c6be92b408eb0106d25019071b14cfdf8633dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:37 -0500 Subject: [PATCH 0035/1534] New translations index.json (Swedish) --- website/src/pages/sv/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/sv/index.json b/website/src/pages/sv/index.json index 9b056e8960c2..778f5e81d7f9 100644 --- a/website/src/pages/sv/index.json +++ b/website/src/pages/sv/index.json @@ -1,67 +1,99 @@ { - "title": "Komma igång", - "intro": "Lär dig om The Graph, ett decentraliserat protokoll för indexering och sökning av data från blockkedjor.", - "shortcuts": { - "aboutTheGraph": { - "title": "Om The Graph", - "description": "Läs mer om The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgrafer", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Snabbstart", - "description": "Hoppa in och börja med The Graph" + "substreams": { + "title": "Underströmmar", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Vanliga frågor för utvecklare", - "description": "Vanliga frågor" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Fråga från en applikation", - "description": "Lär dig att fråga från en applikation" + "graphNode": { + "title": "Graf Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Skapa en Subgraf", - "description": "Använd Studio för att skapa subgrafer" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Nätverks roller", - "description": "Lär dig om The Graph: s nätverks roller.", - "roles": { - "developer": { - "title": "Utvecklare", - "description": "Skapa en subgraf eller använd befintliga subgrafer i en dapp" - }, - "indexer": { - "title": "Indexerare", - "description": "Använd en nod för att indexera data och betjäna frågor" - }, - "curator": { - "title": "Kurator", - "description": "Organisera data genom att signalera på subgrafer" - }, - "delegator": { - "title": "Delegater", - "description": "Säkra nätverket genom att delegera GRT till indexerare" - } + "supportedNetworks": { + "title": "Nätverk som stöds", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Läs mer", - "products": { - "title": "Produkter", - "products": { - "subgraphStudio": { - "title": "Subgraf Studion", - "description": "Skapa, hantera och publicera subgrafer och API nycklar" - }, - "graphExplorer": { - "title": "Graf Utforskaren", - "description": "Utforska subgrafer och interagera med protokollet" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Bästa praxis för förfrågningar", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Nätverk som stöds", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 0170a4cd06fe325f6db1ac53bc8c68c18bb400a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:38 -0500 Subject: [PATCH 0036/1534] New translations index.json (Turkish) --- website/src/pages/tr/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/tr/index.json b/website/src/pages/tr/index.json index ef3649332398..de520639d6ee 100644 --- a/website/src/pages/tr/index.json +++ b/website/src/pages/tr/index.json @@ -1,67 +1,99 @@ { - "title": "Başlayalım", - "intro": "Blok zincirlerinden verileri indekslemek ve sorgulamak için merkeziyetsiz bir protokol olan Graph Protokol hakkında bilgi edinin.", - "shortcuts": { - "aboutTheGraph": { - "title": "Graph Hakkında", - "description": "Graph hakkında daha fazla bilgi edinin" + "title": "Ana sayfa", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraph'ler", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Hızlı Başlangıç", - "description": "Atlayın ve Graph ile başlayın" + "substreams": { + "title": "Substream'ler", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Geliştirici SSS", - "description": "Sıkça Sorulan Sorular" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Bir Uygulama Üzerinden Sorgulama", - "description": "Bir uygulama üzerinden sorgulamayı öğrenin" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Subgraph Oluştur", - "description": "Subgraph'ler oluşturmak için Studio'yu kullanın" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Ağ Rolleri", - "description": "Graph Protokol'ün ağ rolleri hakkında bilgi edinin.", - "roles": { - "developer": { - "title": "Geliştirici", - "description": "Bir subgraph oluşturun veya bir dApp'de mevcut subgraph'leri kullanın" - }, - "indexer": { - "title": "Dizin Oluşturucu", - "description": "Verileri endekslemek ve sorguları sunmak için bir node çalıştırın" - }, - "curator": { - "title": "Küratör", - "description": "Subgraph'lerde sinyal vererek verileri düzenleyin" - }, - "delegator": { - "title": "Yetkilendiren", - "description": "GRT'yi Dizin Oluşturuculara devrederek ağı güvenli hale getirin" - } + "supportedNetworks": { + "title": "Desteklenen Ağlar", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Daha fazlasını okuyun", - "products": { - "title": "Ürünler", - "products": { - "subgraphStudio": { - "title": "Subgraph Stüdyosu", - "description": "Subgraph'ler ve API anahtarları oluşturun, yönetin ve yayınlayın" - }, - "graphExplorer": { - "title": "Graph Gezgini", - "description": "Subgraph'leri keşfedin ve protokolle etkileşime girin" - } + "guides": { + "title": "Rehberler", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Desteklenen Ağlar", - "description": "The Graph aşağıdaki ağları destekler.", - "footer": "Daha fazla detay için {0} sayfasına bakın." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min (asgari)" } } From 79a91b2e07ae6bc1f582567f1378a5a1817328b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:39 -0500 Subject: [PATCH 0037/1534] New translations index.json (Ukrainian) --- website/src/pages/uk/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/uk/index.json b/website/src/pages/uk/index.json index 8ecfe9f81e56..140fe66d0888 100644 --- a/website/src/pages/uk/index.json +++ b/website/src/pages/uk/index.json @@ -1,67 +1,99 @@ { - "title": "Розпочати роботу", - "intro": "Дізнайтеся про Graph - децентралізований протокол для індексації та запитів даних з блокчейнів.", - "shortcuts": { - "aboutTheGraph": { - "title": "Про Graph", - "description": "Дізнайтеся більше про Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Підграфи", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Швидкий старт", - "description": "Приєднуйтесь і розпочніть спільно з Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "FAQ для розробників", - "description": "Поширені запитання" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Запит з додатка", - "description": "Навчіться як робити запити з додатка" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Створення субграфа", - "description": "Використання студії для створення субграфів" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Мережеві ролі", - "description": "Дізнайтеся про мережеві ролі в Graph.", - "roles": { - "developer": { - "title": "Розробник", - "description": "Створити субграф або використати існуючі субграфи в dapp" - }, - "indexer": { - "title": "Індексер", - "description": "Управління нодою для індексування даних та обслуговування запитів" - }, - "curator": { - "title": "Куратор", - "description": "Організувати дані за допомогою індикації на субграфах" - }, - "delegator": { - "title": "Делегат", - "description": "Забезпечте безпеку мережі, делегувавши GRT індексаторам" - } + "supportedNetworks": { + "title": "Мережі, які підтримуються", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Детальніше", - "products": { - "title": "Продукти", - "products": { - "subgraphStudio": { - "title": "Субграф Студія", - "description": "Створення, управління та публікація субграфів і ключів API" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "Дослідження субграфів та їх взаємодія з протоколом" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Найкращі практики виконання запитів", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Мережі, які підтримуються", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 8e0c932cdfb5f5603088441ec3749d652fbb7517 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:40 -0500 Subject: [PATCH 0038/1534] New translations index.json (Chinese Simplified) --- website/src/pages/zh/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/zh/index.json b/website/src/pages/zh/index.json index 7be9a7ca1935..63aa9d5d149a 100644 --- a/website/src/pages/zh/index.json +++ b/website/src/pages/zh/index.json @@ -1,67 +1,99 @@ { - "title": "开始", - "intro": "了解Graph,一个用于索引和查询区块链数据的去中心化的协议。", - "shortcuts": { - "aboutTheGraph": { - "title": "关于 Graph", - "description": "关于Graph的更多信息" + "title": "主页", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "子图", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "快速开始", - "description": "加入并从Graph 开始" + "substreams": { + "title": "子流", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "开发者常见问题", - "description": "常见问题" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "从应用程序查询", - "description": "学习从应用程序中查询" + "graphNode": { + "title": "Graph 节点", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "创建子图", - "description": "在子图工作室中创建子图" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "网络角色", - "description": "了解Graph的网络角色", - "roles": { - "developer": { - "title": "开发者", - "description": "创建子图或在去中心化应用中使用现有子图" - }, - "indexer": { - "title": "索引人", - "description": "通过节点索引数据并提供查询服务" - }, - "curator": { - "title": "策展人", - "description": "通过子图标识结果来组织数据" - }, - "delegator": { - "title": "委托者", - "description": "通过将 GRT 委托给索引人来维护网络" - } + "supportedNetworks": { + "title": "支持的网络", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "了解更多", - "products": { - "title": "产品", - "products": { - "subgraphStudio": { - "title": "子图工作室", - "description": "创建、管理和发布子图和API密钥" - }, - "graphExplorer": { - "title": "Graph 浏览器", - "description": "探索子图并与协议互动" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "查询最佳实践", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "支持的网络", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 7cd2e181533b6b535b31ba61039fbfb0a1bbfe02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:41 -0500 Subject: [PATCH 0039/1534] New translations index.json (Urdu (Pakistan)) --- website/src/pages/ur/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/ur/index.json b/website/src/pages/ur/index.json index 9065fe445b9a..b858bb6da45c 100644 --- a/website/src/pages/ur/index.json +++ b/website/src/pages/ur/index.json @@ -1,67 +1,99 @@ { - "title": "شروع کریں", - "intro": "گراف کے بارے میں جانیں، ایک ڈیسینٹرلائز پروٹوکول جو بلاک چینز سے ڈیٹا کو انڈیکس کرنے اور کیوری کرتا ہے.", - "shortcuts": { - "aboutTheGraph": { - "title": "گراف کے بارے میں", - "description": "گراف کے بارے میں مزید جانیں" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "سب گراف", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "فورا شروع کریں", - "description": "کود پڑیں اور گراف کے ساتھ شروع کریں" + "substreams": { + "title": "سب سٹریمز", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "ڈویلپر کے اکثر پوچھے گئے سوالات", - "description": "اکثر پوچھے گئے سوالات" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "ایپلیکیشن سے کیوری کریں", - "description": "ایپلیکیشن سے کیوری کرنا سیکھیں" + "graphNode": { + "title": "گراف نوڈ", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "سب گراف بنائیں", - "description": "سب گراف بنانے کے لیے سٹوڈیو کا استعمال کریں" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "نیٹ ورک کے کردار", - "description": "گراف کے نیٹ ورک کے کرداروں کے بارے میں جانیں.", - "roles": { - "developer": { - "title": "ڈویلپر", - "description": "ایک سب گراف بنائیں یا ڈیپ میں موجودہ سب گرافس استعمال کریں" - }, - "indexer": { - "title": "انڈیکسر", - "description": "ڈیٹا کو انڈیکس کرنے کے لیے ایک نوڈ چلائیں اور کیوریز بھیجیں" - }, - "curator": { - "title": "کیوریٹر", - "description": "سب گرافس پر سگنل دے کر ڈیٹا کو منظم کریں" - }, - "delegator": { - "title": "ڈیلیگیٹر", - "description": "GRT انڈیکسرز کو ڈیلیگیٹ کر کے نیٹ ورک کو محفوظ بنائیں" - } + "supportedNetworks": { + "title": "تعاون یافتہ نیٹ ورکس", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "مزید پڑہیں", - "products": { - "title": "مصنوعات", - "products": { - "subgraphStudio": { - "title": "سب گراف سٹوڈیو", - "description": "سب گراف اور API کیز بنائیں، ان کو منظم کریں اور شائع کریں" - }, - "graphExplorer": { - "title": "گراف ایکسپلورر", - "description": "سب گرافس کو دریافت کریں اور پروٹوکول کے ساتھ ہم آہنگی کریں" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "بہترین طریقوں سے کیوری کرنا", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "تعاون یافتہ نیٹ ورکس", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 29698ea1d5c6dc275db5891d528b7aa6a8f86d11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:42 -0500 Subject: [PATCH 0040/1534] New translations index.json (Vietnamese) --- website/src/pages/vi/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/vi/index.json b/website/src/pages/vi/index.json index 8d84655f79d8..1490c98de759 100644 --- a/website/src/pages/vi/index.json +++ b/website/src/pages/vi/index.json @@ -1,67 +1,99 @@ { - "title": "Bắt đầu", - "intro": "Tìm hiểu về The Graph một giao thức phi tập trung để lập chỉ mục và truy vấn dữ liệu từ các blockchain.", - "shortcuts": { - "aboutTheGraph": { - "title": "Về The Graph", - "description": "Tìm hiểu thêm về The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "Subgraphs", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "Bắt đầu nhanh", - "description": "Nhảy vào và bắt đầu với The Graph" + "substreams": { + "title": "Substreams", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "Câu hỏi thường gặp dành cho nhà phát triển", - "description": "Các câu hỏi thường gặp" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "Truy vấn từ một ứng dụng", - "description": "Học cách truy vấn từ một ứng dụng" + "graphNode": { + "title": "Graph Node", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "Tạo một Subgraph", - "description": "Sử dụng Studio để tạo các subgraph" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "Các vai trò trong mạng", - "description": "Tìm hiểu về các vai trò trong mạng The Graph.", - "roles": { - "developer": { - "title": "Nhà phát triển", - "description": "Tạo một subgraph hoặc sử dụng các subgraph hiện có trong một dapp" - }, - "indexer": { - "title": "Indexer", - "description": "Vận hành một nút để lập chỉ mục dữ liệu và phục vụ các truy vấn" - }, - "curator": { - "title": "Curator", - "description": "Tổ chức dữ liệu bằng cách báo hiệu trên các subgraph" - }, - "delegator": { - "title": "Delegator", - "description": "Bảo mật mạng bằng cách ủy quyền GRT cho Indexers" - } + "supportedNetworks": { + "title": "Mạng lưới được hỗ trợ", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "Đọc thêm", - "products": { - "title": "Các sản phẩm", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Tạo, quản lý và xuất bản các subgraph và khóa API" - }, - "graphExplorer": { - "title": "Trình khám phá Graph", - "description": "Khám phá các subgraph và tương tác với giao thức" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Mạng lưới được hỗ trợ", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From ed2183c0fcdf7126485c061276369fc182370f11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:43 -0500 Subject: [PATCH 0041/1534] New translations index.json (Marathi) --- website/src/pages/mr/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/mr/index.json b/website/src/pages/mr/index.json index 5d50b71571c4..3bd097a42eef 100644 --- a/website/src/pages/mr/index.json +++ b/website/src/pages/mr/index.json @@ -1,67 +1,99 @@ { - "title": "सुरु करूया", - "intro": "द ग्राफ बद्दल जाणून घ्या, ब्लॉकचेन वरून डेटा अनुक्रमित करण्यासाठी आणि क्वेरी करण्यासाठी विकेंद्रित प्रोटोकॉल.", - "shortcuts": { - "aboutTheGraph": { - "title": "ग्राफ बद्दल", - "description": "Learn more about The Graph" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "सबग्राफ", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "क्विक स्टार्ट", - "description": "मध्ये जा आणि आलेख सह प्रारंभ करा" + "substreams": { + "title": "उपप्रवाह", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "विकसक वारंवार विचारले जाणारे प्रश्न", - "description": "सतत विचारले जाणारे प्रश्न" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "अर्जावरून क्वेरी", - "description": "अर्जावरून क्वेरी करायला शिका" + "graphNode": { + "title": "आलेख नोड", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "सबग्राफ तयार करा", - "description": "सबग्राफ तयार करण्यासाठी स्टुडिओ वापरा" + "firehose": { + "title": "फायरहोस", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "नेटवर्क भूमिका", - "description": "ग्राफच्या नेटवर्क भूमिकांबद्दल जाणून घ्या.", - "roles": { - "developer": { - "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" - }, - "indexer": { - "title": "Indexer", - "description": "डेटा इंडेक्स करण्यासाठी नोड ऑपरेट करा आणि क्वेरी सर्व्ह करा" - }, - "curator": { - "title": "Curator", - "description": "Organize data by signaling on subgraphs" - }, - "delegator": { - "title": "अधिकारप्राप्त कर्ता", - "description": "Secure the network by delegating GRT to Indexers" - } + "supportedNetworks": { + "title": "Supported Networks", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "पुढे वाचा", - "products": { - "title": "Products", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "सबग्राफ आणि API की तयार करा, व्यवस्थापित करा आणि प्रकाशित करा" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "सबग्राफ एक्सप्लोर करा आणि प्रोटोकॉलशी संवाद साधा" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "Querying Best Practices", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks.", - "footer": "For more details, see the {0} page." + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From c985fe8b955ecac7d86c97fa23bf615f6c9fe347 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:44 -0500 Subject: [PATCH 0042/1534] New translations index.json (Hindi) --- website/src/pages/hi/index.json | 140 ++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 54 deletions(-) diff --git a/website/src/pages/hi/index.json b/website/src/pages/hi/index.json index 8b2ce2fa9a68..f50c21715290 100644 --- a/website/src/pages/hi/index.json +++ b/website/src/pages/hi/index.json @@ -1,67 +1,99 @@ { - "title": "शुरू हो जाओ", - "intro": "द ग्राफ के बारे में जानें, ब्लॉकचेन से डेटा को अनुक्रमित करने और क्वेरी करने के लिए एक विकेन्द्रीकृत प्रोटोकॉल।", - "shortcuts": { - "aboutTheGraph": { - "title": "ग्राफ के बारे में", - "description": "ग्राफ़ के बारे में और जानें" + "title": "Home", + "hero": { + "title": "The Graph Docs", + "description": "Kick-start your web3 project with the tools to extract, transform, and load blockchain data.", + "cta1": "How The Graph works", + "cta2": "Build your first subgraph" + }, + "products": { + "title": "The Graph’s Products", + "description": "Choose a solution that fits your needs—interact with blockchain data your way.", + "subgraphs": { + "title": "सबग्राफ", + "description": "Extract, process, and query blockchain data with open APIs.", + "cta": "Develop a subgraph" }, - "quickStart": { - "title": "जल्दी शुरू", - "description": "कूदो और ग्राफ के साथ शुरू करो" + "substreams": { + "title": "सबस्ट्रीम", + "description": "Fetch and consume blockchain data with parallel execution.", + "cta": "Develop with Substreams" }, - "developerFaqs": { - "title": "डेवलपर अक्सर पूछे जाने वाले प्रश्न", - "description": "अक्सर पूछे जाने वाले प्रश्न" + "sps": { + "title": "Substreams-Powered Subgraphs", + "description": "Boost your subgraph’s efficiency and scalability by using Substreams.", + "cta": "Set up a Substreams-powered subgraph" }, - "queryFromAnApplication": { - "title": "एक एप्लिकेशन से पूछे गए प्रश्न", - "description": "एक एप्लिकेशन से पूछे जाने वाले प्रश्न सीखें।" + "graphNode": { + "title": "ग्राफ-नोड", + "description": "Index blockchain data and serve it via GraphQL queries.", + "cta": "Set up a local Graph Node" }, - "createASubgraph": { - "title": "एक सबग्राफ बनाएं", - "description": "सबग्राफ बनाने के लिए स्टूडियो का प्रयोग करें" + "firehose": { + "title": "Firehose", + "description": "Extract blockchain data into flat files to enhance sync times and streaming capabilities.", + "cta": "Get started with Firehose" } }, - "networkRoles": { - "title": "नेटवर्क भूमिकाएँ", - "description": "ग्राफ़ की नेटवर्क भूमिकाओं के बारे में जानें।", - "roles": { - "developer": { - "title": "डेवलपर", - "description": "एक subgraph बनाएं या dapp में मौजूदा subgraph का उपयोग करें" - }, - "indexer": { - "title": "Indexer", - "description": "कोई नोड संचालित करें ताकि डेटा को इंडेक्स किया जा सके और queries को सर्व किया जा सके" - }, - "curator": { - "title": "Curator", - "description": "डेटा को 'signaling' द्वारा subgraphs पर व्यवस्थित करें" - }, - "delegator": { - "title": "Delegator", - "description": "नेटवर्क को सुरक्षित करें GRT को Indexers को डेलीगेट करके" - } + "supportedNetworks": { + "title": "समर्थित नेटवर्क", + "description": { + "base": "The Graph supports {0}. To add a new network, {1}", + "networks": "networks", + "completeThisForm": "complete this form" } }, - "readMore": "और जानें", - "products": { - "title": "उत्पादों", - "products": { - "subgraphStudio": { - "title": "Subgraph Studio", - "description": "सबग्राफ और एपीआई कुंजी बनाएं, प्रबंधित करें और प्रकाशित करें" - }, - "graphExplorer": { - "title": "Graph Explorer", - "description": "सबग्राफ़ और प्रोटोकॉल के साथ इंटरैक्ट करें" - } + "guides": { + "title": "Guides", + "description": "", + "explorer": { + "title": "Find Data in Graph Explorer", + "description": "Leverage hundreds of public subgraphs for existing blockchain data." + }, + "publishASubgraph": { + "title": "Publish a Subgraph", + "description": "Add your subgraph to the decentralized network." + }, + "publishSubstreams": { + "title": "Publish Substreams", + "description": "Launch your Substreams package to the Substreams Registry." + }, + "queryingBestPractices": { + "title": "सर्वोत्तम प्रथाओं को क्वेरी करना", + "description": "Optimize your subgraph queries for faster, better results." + }, + "timeseries": { + "title": "Optimized Timeseries & Aggregations", + "description": "Streamline your subgraph for efficiency." + }, + "apiKeyManagement": { + "title": "API Key Management", + "description": "Easily create, manage, and secure API keys for your subgraphs." + }, + "transferToTheGraph": { + "title": "Transfer to The Graph", + "description": "Seamlessly upgrade your subgraph from any platform." } }, - "supportedNetworks": { - "title": "समर्थित नेटवर्क", - "description": "The Graph निम्नलिखित नेटवर्क का समर्थन करता है।", - "footer": "अधिक जानकारी के लिए, {0} पेज देखें।" + "videos": { + "title": "Video Tutorials", + "watchOnYouTube": "Watch on YouTube", + "theGraphExplained": { + "title": "The Graph Explained In 1 Minute", + "description": "What is The Graph? How does it work? Why does it matter so much to web3 developers? Learn how and why The Graph is the backbone of web3 in this short, non-technical video." + }, + "whatIsDelegating": { + "title": "What is Delegating?", + "description": "Delegators are key participants who help secure The Graph by staking their GRT tokens to Indexers. This video explains key concepts to understand before delegating." + }, + "howToIndexSolana": { + "title": "How to Index Solana with a Substreams-powered Subgraph", + "description": "If you’re familiar with subgraphs, discover how Substreams offer a different approach for key use cases. This video walks you through the process of building your first Substreams-powered subgraph." + } + }, + "time": { + "reading": "Reading time", + "duration": "Duration", + "minutes": "min" } } From 7b59ac49040bd1fcdd2593eb2b1af4fb74dd1d8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:43:57 -0500 Subject: [PATCH 0043/1534] New translations about.mdx (French) --- website/src/pages/fr/about.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/fr/about.mdx b/website/src/pages/fr/about.mdx index 8557e5d8f522..0740a57e71c5 100644 --- a/website/src/pages/fr/about.mdx +++ b/website/src/pages/fr/about.mdx @@ -24,7 +24,7 @@ Dans le cas de l'exemple mentionné ci-dessus, Bored Ape Yacht Club, vous pouvez Il faudrait des **heures, voire des jours,** pour qu'une application décentralisée (dapp) fonctionnant dans un navigateur obtienne une réponse à ces questions simples. -Une alternative serait de configurer votre propre serveur, de traiter les transactions, de les stocker dans une base de données et de créer une API pour interroger les données. Cependant, cette solution est [coûteuse en ressources](/resources/benefits/), nécessite une maintenance constante, présente un point de défaillance unique et compromet d'importantes propriétés de securité essentiels à la décentralisation. +Alternatively, you have the option to set up your own server, process the transactions, store them in a database, and create an API endpoint to query the data. However, this option is [resource intensive](/resources/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. Les spécificités de la blockchain, comme la finalité des transactions, les réorganisations de chaîne et les blocs oncles (blocs rejetés lorsque deux blocs sont créés simultanément, ce qui entraîne l'omission d'un bloc de la blockchain.), ajoutent de la complexité au processus, rendant longue et conceptuellement difficile la récupération de résultats précis à partir des données de la blockchain. From b5620eb6cfd24a68d5eaaa11ac9463854cce8f20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:03 -0500 Subject: [PATCH 0044/1534] New translations about.mdx (Portuguese) --- website/src/pages/pt/about.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/about.mdx b/website/src/pages/pt/about.mdx index 1648640230f7..6603713efd91 100644 --- a/website/src/pages/pt/about.mdx +++ b/website/src/pages/pt/about.mdx @@ -24,7 +24,7 @@ No caso do exemplo listado acima, o Bored Ape Yacht Club, é possível realizar Levariam **horas, ou até mesmo dias**, para que um aplicativo descentralizado (dApp) executado em um navegador conseguisse uma resposta a estas questões simples. -Como alternativa, haveria a opção de construir o seu próprio servidor, processar as transações, salvá-las num banco de dados, e construir um endpoint de API sobre tudo isso tudo para poder fazer o query dos dados. Porém, esta opção [consome muitos recursos](/resources/benefits/), precisa de manutenção, apresenta um único ponto de falha, e quebra propriedades de segurança importantes obrigatórias para a descentralização. +Alternatively, you have the option to set up your own server, process the transactions, store them in a database, and create an API endpoint to query the data. However, this option is [resource intensive](/resources/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. Propriedades de blockchain, como finalidade, reorganizações de chain, ou blocos uncle, complicam ainda mais este processo, e não apenas o tornam longo e cansativo, mas dificultam conceitualmente a retirada de resultados precisos de queries dos dados da blockchain. From eed2e95ac81abd29ce6b55eeec8db9050e0d6e95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:04 -0500 Subject: [PATCH 0045/1534] New translations about.mdx (Russian) --- website/src/pages/ru/about.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/about.mdx b/website/src/pages/ru/about.mdx index 6aa90fdbc628..35f9c6efd933 100644 --- a/website/src/pages/ru/about.mdx +++ b/website/src/pages/ru/about.mdx @@ -24,7 +24,7 @@ The Graph — это мощный децентрализованный прот Децентрализованному приложению (dapp), запущенному в браузере, потребуются **часы или даже дни**, чтобы получить ответ на эти простые вопросы. -В качестве альтернативы у Вас есть возможность настроить собственный сервер, обрабатывать транзакции, хранить их в базе данных и создать конечную точку API для запроса данных. Однако этот вариант [ресурсоемок](/resources/benefits/), требует обслуживания, представляет собой единственную возможную причину сбоя и нарушает важные свойства безопасности, необходимые для децентрализации. +Alternatively, you have the option to set up your own server, process the transactions, store them in a database, and create an API endpoint to query the data. However, this option is [resource intensive](/resources/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. Такие свойства блокчейна, как окончательность, реорганизация чейна и необработанные блоки, усложняют процесс, делая получение точных результатов запроса из данных блокчейна трудоемким и концептуально сложным. From f2c03c94c908f9901e133838ad93bbdf0cab8316 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:06 -0500 Subject: [PATCH 0046/1534] New translations about.mdx (Turkish) --- website/src/pages/tr/about.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/about.mdx b/website/src/pages/tr/about.mdx index 9c1841c92789..775696c41265 100644 --- a/website/src/pages/tr/about.mdx +++ b/website/src/pages/tr/about.mdx @@ -24,7 +24,7 @@ Yukarıda bahsedilen Bored Ape Yacht Club örneğinde, [sözleşmenin](https://e Bu basit sorulara yanıt almak, tarayıcıda çalışan bir merkeziyetsiz uygulama (dapp) için **saatler, hatta günler ** sürebilir. -Alternatif olarak, kendi sunucunuzu kurup işlemleri işleyebilir, bunları bir veri tabanında depolayabilir ve veriyi sorgulamak için bir API uç noktası oluşturabilirsiniz. Ancak, bu seçenek [kaynak açısından maliyetli](/resources/benefits/) olup sürekli bakım gerektirir, tek bir arıza noktası oluşturur ve merkeziyetsizliğin gerektirdiği önemli güvenlik özelliklerini ortadan kaldırır. +Alternatively, you have the option to set up your own server, process the transactions, store them in a database, and create an API endpoint to query the data. However, this option is [resource intensive](/resources/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. Finalite, zincir yeniden organizasyonu ve "uncle" bloklar gibi blokzinciri özellikleri, sürece karmaşıklık katar ve doğru sorgu sonuçlarını blokzinciri verilerinden elde etmeyi zaman alıcı hale getirip kavramsal olarak zorlaştırır. From 736180191c2bd1d010376f96af6c0f7a81106519 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:10 -0500 Subject: [PATCH 0047/1534] New translations about.mdx (Hindi) --- website/src/pages/hi/about.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/about.mdx b/website/src/pages/hi/about.mdx index 4676fe4c295f..6dc30fe86bb9 100644 --- a/website/src/pages/hi/about.mdx +++ b/website/src/pages/hi/about.mdx @@ -2,7 +2,7 @@ title: The Graph के बारे में --- -## What is The Graph? +## The Graph क्या है? The Graph एक शक्तिशाली विकेंद्रीकृत प्रोटोकॉल है जो ब्लॉकचेन डेटा को आसानी से क्वेरी और इंडेक्स करने में सक्षम बनाता है। यह ब्लॉकचेन डेटा को क्वेरी करने की जटिल प्रक्रिया को सरल बनाता है, जिससे डैप विकास तेज और आसान हो जाता है। @@ -24,11 +24,11 @@ The Graph एक शक्तिशाली विकेंद्रीकृ यह सरल सवालों का जवाब पाने में एक ब्राउज़र में चल रही एक विकेन्द्रीकृत एप्लिकेशन (dapp) को **घंटे या यहाँ तक कि दिन** लग सकते हैं। -वैकल्पिक रूप से, आपके पास अपना स्वयं का सर्वर सेट अप करने, लेनदेन को प्रोसेस करने, उन्हें एक डेटाबेस में स्टोर करने, और डेटा को क्वेरी करने के लिए एक API एन्डपॉइंट बनाने का विकल्प है। हालांकि, यह विकल्प [संसाधन-संवेदनशील](/resources/benefits/) है, रखरखाव की आवश्यकता होती है, एकल विफलता बिंदु प्रस्तुत करता है, और विकेंद्रीकरण के लिए आवश्यक महत्वपूर्ण सुरक्षा गुणों को तोड़ता है। +Alternatively, you have the option to set up your own server, process the transactions, store them in a database, and create an API endpoint to query the data. However, this option is [resource intensive](/resources/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. ब्लॉकचेन की विशेषताएँ, जैसे अंतिमता, चेन पुनर्गठन, और अंकल ब्लॉक्स, प्रक्रिया में जटिलता जोड़ती हैं, जिससे ब्लॉकचेन डेटा से सटीक क्वेरी परिणाम प्राप्त करना समय लेने वाला और अवधारणात्मक रूप से चुनौतीपूर्ण हो जाता है। -## The Graph एक समाधान प्रदान करता है +## The Graph एक समाधान प्रदान करता है The Graph इस चुनौती को एक विकेन्द्रीकृत प्रोटोकॉल के माध्यम से हल करता है जो ब्लॉकचेन डेटा को इंडेक्स करता है और उसकी कुशल और उच्च-प्रदर्शन वाली क्वेरी करने की सुविधा प्रदान करता है। ये एपीआई (इंडेक्स किए गए "सबग्राफ") फिर एक मानक GraphQL एपीआई के साथ क्वेरी की जा सकती हैं। From a864429e2c11cad2f0c4d2a1a734b247f20e1104 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:11 -0500 Subject: [PATCH 0048/1534] New translations starting-your-subgraph.mdx (Romanian) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ro/subgraphs/developing/creating/starting-your-subgraph.mdx index f463ca3e9507..4823231d9a40 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From d732ad2351372ccdb1b74d2b91ac4d95ecb221c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:12 -0500 Subject: [PATCH 0049/1534] New translations starting-your-subgraph.mdx (French) --- .../creating/starting-your-subgraph.mdx | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/fr/subgraphs/developing/creating/starting-your-subgraph.mdx index 6a7795812160..4030093310a4 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -1,21 +1,23 @@ --- -title: Starting Your Subgraph +title: Démarrer votre subgraph --- ## Aperçu -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph contient des milliers de subgraphs déjà disponibles pour des requêtes. Consultez [The Graph Explorer](https://thegraph.com/explorer) et trouvez-en un qui correspond déjà à vos besoins. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +Lorsque vous créez un [subgraph](/subgraphs/developing/subgraphs/), vous créez une API ouverte personnalisée qui extrait des données d'une blockchain, les traite, les stocke et les rend faciles à interroger via GraphQL. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Le développement de subgraphs peut aller de simples modèles « scaffold » à des subgraphs avancés, spécialement adaptés à vos besoins. -### Start Building +### Commencez à développer -Start the process and build a subgraph that matches your needs: +Lancez le processus et construisez un subgraph qui correspond à vos besoins : -1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema -4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +1. [Installer la CLI](/subgraphs/developing/creating/install-the-cli/) - Configurez votre infrastructure +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Comprenez le composant clé d'un subgraph +3. [Le schéma GraphQL](/subgraphs/developing/creating/ql-schema/) - Écrivez votre schéma +4. [Écrire les mappings AssemblyScript](/subgraphs/developing/creating/assemblyscript-mappings/) - Rédigez vos mappings +5. [Fonctionnalités avancées](/subgraphs/developing/creating/advanced/) - Personnalisez votre subgraphs avec des fonctionnalités avancées + +Explorez d'autres [ressources pour les API](/subgraphs/developing/creating/graph-ts/README/) et effectuez des tests en local avec [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 3d8a8cf3f232a8dc8092b621cceeab27a80ff30f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:13 -0500 Subject: [PATCH 0050/1534] New translations starting-your-subgraph.mdx (Spanish) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/es/subgraphs/developing/creating/starting-your-subgraph.mdx index c0d57cc7fdbe..76ff7db16bba 100644 --- a/website/src/pages/es/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 9b5dd85cb6624f5232f06bb925bfa139f6263fe3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:14 -0500 Subject: [PATCH 0051/1534] New translations starting-your-subgraph.mdx (Arabic) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ar/subgraphs/developing/creating/starting-your-subgraph.mdx index 37c6b4029005..8f2e787688c2 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 2455093b3013e3e2dc6d17bcc88914e0648ae615 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:15 -0500 Subject: [PATCH 0052/1534] New translations starting-your-subgraph.mdx (Czech) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/cs/subgraphs/developing/creating/starting-your-subgraph.mdx index ebfaefc06b4f..436b407a19ba 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From d1dbf37544010abce110df18ef3d69b2c9a7fd27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:16 -0500 Subject: [PATCH 0053/1534] New translations starting-your-subgraph.mdx (German) --- .../creating/starting-your-subgraph.mdx | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/de/subgraphs/developing/creating/starting-your-subgraph.mdx index f463ca3e9507..dbffb92cfc5e 100644 --- a/website/src/pages/de/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -1,21 +1,23 @@ --- -title: Starting Your Subgraph +title: Starten Ihres Subgraphen --- -## Overview +## Überblick -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +The Graph beherbergt Tausende von Subgraphen, die bereits für Abfragen zur Verfügung stehen. Schauen Sie also in [The Graph Explorer] (https://thegraph.com/explorer) nach und finden Sie einen, der Ihren Anforderungen entspricht. -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +Wenn Sie einen [Subgraphen](/subgraphs/developing/subgraphs/) erstellen, erstellen Sie eine benutzerdefinierte offene API, die Daten aus einer Blockchain extrahiert, verarbeitet, speichert und über GraphQL einfach abfragen lässt. -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. +Die Entwicklung von Subgraphen reicht von einfachen Gerüst-Subgraphen bis hin zu fortgeschrittenen, speziell zugeschnittenen Subgraphen. -### Start Building +### Start des Erstellens -Start the process and build a subgraph that matches your needs: +Starten Sie den Prozess und erstellen Sie einen Subgraphen, der Ihren Anforderungen entspricht: -1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema -4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features +1. [Installieren der CLI](/subgraphs/developing/creating/install-the-cli/) - Richten Sie Ihre Infrastruktur ein +2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Verstehen der wichtigsten Komponenten eines Subgraphen +3. [Das GraphQL-Schema](/subgraphs/developing/creating/ql-schema/) - Schreiben Sie Ihr Schema +4. [Schreiben von AssemblyScript-Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Schreiben Sie Ihre Mappings +5. [Erweiterte Funktionen](/subgraphs/developing/creating/advanced/) - Passen Sie Ihren Subgraph mit erweiterten Funktionen an + +Erkunden Sie zusätzliche [Ressourcen für APIs](/subgraphs/developing/creating/graph-ts/README/) und führen Sie lokale Tests mit [Matchstick](/subgraphs/developing/creating/unit-testing-framework/) durch. From 4d6cea3f1891f0ae4a2efbb85b3cd02b73cbfd50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:17 -0500 Subject: [PATCH 0054/1534] New translations starting-your-subgraph.mdx (Italian) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/it/subgraphs/developing/creating/starting-your-subgraph.mdx index c448c25bf01f..6b6247b0ce50 100644 --- a/website/src/pages/it/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 39c4c1142712dd3db084f2c2ef2831beab9d3044 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:18 -0500 Subject: [PATCH 0055/1534] New translations starting-your-subgraph.mdx (Japanese) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ja/subgraphs/developing/creating/starting-your-subgraph.mdx index 49828585e1e2..c2dcb7ad1d68 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 8a84190461ab03db616fa006f76afcf827fd4ba2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:19 -0500 Subject: [PATCH 0056/1534] New translations starting-your-subgraph.mdx (Korean) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ko/subgraphs/developing/creating/starting-your-subgraph.mdx index f463ca3e9507..4823231d9a40 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 44b053794bcf196d0e11ea4f18e22e52bdf1019d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:20 -0500 Subject: [PATCH 0057/1534] New translations starting-your-subgraph.mdx (Dutch) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/nl/subgraphs/developing/creating/starting-your-subgraph.mdx index f463ca3e9507..4823231d9a40 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From b50ae8e8570f2d2df52e3b65abd389d37474c885 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:21 -0500 Subject: [PATCH 0058/1534] New translations starting-your-subgraph.mdx (Polish) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/pl/subgraphs/developing/creating/starting-your-subgraph.mdx index f463ca3e9507..4823231d9a40 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 83fde34844d13bb0246bc54d23c08d8a1e3029e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:22 -0500 Subject: [PATCH 0059/1534] New translations starting-your-subgraph.mdx (Portuguese) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/pt/subgraphs/developing/creating/starting-your-subgraph.mdx index 46190164b8a5..1b70a2ec98ad 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 5ec2fb8f6bd55b7ecffc7f2233a61dc41ebad07d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:23 -0500 Subject: [PATCH 0060/1534] New translations starting-your-subgraph.mdx (Russian) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ru/subgraphs/developing/creating/starting-your-subgraph.mdx index 380c6a5a9314..8136fb559cff 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 791f9ffe7bd809cb20d75dcfcd4ea797d0a00e05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:23 -0500 Subject: [PATCH 0061/1534] New translations starting-your-subgraph.mdx (Swedish) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/sv/subgraphs/developing/creating/starting-your-subgraph.mdx index 04fe67d27470..9f06ce8fcd1d 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 82f1865403a45ca7529ac74f5cd612f7850ea1a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:24 -0500 Subject: [PATCH 0062/1534] New translations starting-your-subgraph.mdx (Turkish) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/tr/subgraphs/developing/creating/starting-your-subgraph.mdx index 7367bf775d28..c10f6facbb0d 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From dcaf761476a7edc5c6bda678d1c02ea91f1215e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:25 -0500 Subject: [PATCH 0063/1534] New translations starting-your-subgraph.mdx (Ukrainian) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/uk/subgraphs/developing/creating/starting-your-subgraph.mdx index f463ca3e9507..4823231d9a40 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 59a568c4d2ff42482bf60e2000b96c22794c596e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:26 -0500 Subject: [PATCH 0064/1534] New translations starting-your-subgraph.mdx (Chinese Simplified) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/zh/subgraphs/developing/creating/starting-your-subgraph.mdx index fdef40db3f8a..d00c872abc59 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From e9778ba83a98deea750c1c653788af4dd69c2c3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:27 -0500 Subject: [PATCH 0065/1534] New translations starting-your-subgraph.mdx (Urdu (Pakistan)) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/ur/subgraphs/developing/creating/starting-your-subgraph.mdx index 455cb2e3a413..3f0d9b8cde40 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 8363749d7238da02ac8f0f3d4bfa583db2f4c268 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:28 -0500 Subject: [PATCH 0066/1534] New translations starting-your-subgraph.mdx (Vietnamese) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/vi/subgraphs/developing/creating/starting-your-subgraph.mdx index faa0cc958d66..f7427e79c81a 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 56206762fbf28b1fcf5ef23e299ee89a326eeefe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:29 -0500 Subject: [PATCH 0067/1534] New translations starting-your-subgraph.mdx (Marathi) --- .../subgraphs/developing/creating/starting-your-subgraph.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/mr/subgraphs/developing/creating/starting-your-subgraph.mdx index 31806369cd19..946093ef308b 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 048261a1a0c762a31c7208274b71cd80f238e8ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:30 -0500 Subject: [PATCH 0068/1534] New translations starting-your-subgraph.mdx (Hindi) --- .../developing/creating/starting-your-subgraph.mdx | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/starting-your-subgraph.mdx b/website/src/pages/hi/subgraphs/developing/creating/starting-your-subgraph.mdx index de24bff19b47..a162f802cf9c 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/starting-your-subgraph.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/starting-your-subgraph.mdx @@ -4,9 +4,9 @@ title: Starting Your Subgraph ## अवलोकन -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. +ग्राफ़ में पहले से ही हजारों सबग्राफ उपलब्ध हैं, जिन्हें क्वेरी के लिए उपयोग किया जा सकता है, तो The Graph Explorer(https://thegraph.com/explorer) को चेक करें और ऐसा कोई Subgraph ढूंढें जो पहले से आपकी ज़रूरतों से मेल खाता हो। -When you create a [subgraph](/subgraphs/developing/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. +जब आप एक [सबग्राफ](/subgraphs/developing/subgraphs/)बनाते हैं, तो आप एक कस्टम ओपन API बनाते हैं जो ब्लॉकचेन से डेटा निकालता है, उसे प्रोसेस करता है, स्टोर करता है और इसे GraphQL के माध्यम से क्वेरी करना आसान बनाता है। Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. @@ -16,6 +16,8 @@ Start the process and build a subgraph that matches your needs: 1. [Install the CLI](/subgraphs/developing/creating/install-the-cli/) - Set up your infrastructure 2. [Subgraph Manifest](/subgraphs/developing/creating/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema +3. [The GraphQL Schema](/subgraphs/developing/creating/ql-schema/) - Write your schema 4. [Writing AssemblyScript Mappings](/subgraphs/developing/creating/assemblyscript-mappings/) - Write your mappings 5. [Advanced Features](/subgraphs/developing/creating/advanced/) - Customize your subgraph with advanced features + +Explore additional [resources for APIs](/subgraphs/developing/creating/graph-ts/README/) and conduct local testing with [Matchstick](/subgraphs/developing/creating/unit-testing-framework/). From 30dd7c4d48252d6c4358354cae9681a52c79dfd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:31 -0500 Subject: [PATCH 0069/1534] New translations overview.mdx (Romanian) --- website/src/pages/ro/indexing/overview.mdx | 60 +++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/website/src/pages/ro/indexing/overview.mdx b/website/src/pages/ro/indexing/overview.mdx index bf51dec0b32b..3f9b35378f86 100644 --- a/website/src/pages/ro/indexing/overview.mdx +++ b/website/src/pages/ro/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexing +title: Indexing Overview +sidebarTitle: Overview --- Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. @@ -59,8 +60,7 @@ query indexerAllocations { Use Etherscan to call `getRewards()`: - Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* To call `getRewards()`: +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - Enter the **allocationID** in the input. - Click the **Query** button. @@ -110,12 +110,12 @@ Indexers may differentiate themselves by applying advanced techniques for making - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. - **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -135,7 +135,7 @@ At the center of an Indexer's infrastructure is the Graph Node which monitors th - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,26 +147,26 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | ### Setup server infrastructure using Terraform on Google Cloud @@ -256,7 +256,7 @@ indexer= cat > terraform.tfvars < Date: Fri, 14 Feb 2025 12:44:33 -0500 Subject: [PATCH 0070/1534] New translations overview.mdx (French) --- website/src/pages/fr/indexing/overview.mdx | 497 ++++++++++----------- 1 file changed, 248 insertions(+), 249 deletions(-) diff --git a/website/src/pages/fr/indexing/overview.mdx b/website/src/pages/fr/indexing/overview.mdx index fe832149f3bf..e08b0f57831e 100644 --- a/website/src/pages/fr/indexing/overview.mdx +++ b/website/src/pages/fr/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexage +title: Vue d’ensemble de l’indexation +sidebarTitle: Aperçu --- Les indexeurs sont des opérateurs de nœuds dans The Graph Network qui mettent en jeu des jetons Graph (GRT) afin de fournir des services d'indexation et de traitement de requêtes. Les indexeurs perçoivent des frais de requête et des récompenses d'indexation pour leurs services. Ils perçoivent également des frais de requête qui sont réduits selon une fonction de remise exponentielle. @@ -8,39 +9,39 @@ Le GRT intégré au protocole est soumis à une période de décongélation et p Les indexeurs sélectionnent les subgraphs à indexer en fonction du signal de curation du subgraph, où les curateurs misent du GRT afin d'indiquer quels subgraphs sont de haute qualité et doivent être priorisés. Les consommateurs (par exemple les applications) peuvent également définir les paramètres pour lesquels les indexeurs traitent les requêtes pour leurs subgraphs et définir les préférences pour la tarification des frais de requête. -## Questions fréquemment posées +## FAQ -### Quelle est la mise minimale requise pour être indexeur sur le réseau ? +### What is the minimum stake required to be an Indexer on the network? -La mise minimale pour un indexeur est actuellement fixée à 100 000 GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### Quelles sont les sources de revenus pour un indexeur ? +### What are the revenue streams for an Indexer? -**Remises sur les frais de requête** : paiements pour le traitement des requêtes sur le réseau. Ces paiements sont acheminés via des canaux étatiques entre un indexeur et une passerelle. Chaque requête de requête provenant d'une passerelle contient un paiement et la réponse correspondante une preuve de validité du résultat de la requête. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Récompenses d'indexation** : générées via une inflation annuelle de 3 % à l'échelle du protocole, les récompenses d'indexation sont distribuées aux indexeurs qui indexent les déploiements de subgraphs pour le réseau. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### Comment sont distribuées les récompenses d’indexation ? +### How are indexing rewards distributed? -Les récompenses de l'indexation proviennent de l'inflation du protocole qui est fixée à 3 % par an. Ils sont répartis entre les subraphs en fonction de la proportion de tous les signaux de curation sur chacun, puis distribués proportionnellement aux indexeurs en fonction de leur participation allouée sur ce subgraph. **Une allocation doit être clôturée avec une preuve d'indexation (POI) valide et répondant aux normes fixées par la charte d'arbitrage afin d'être éligible aux récompenses.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** -De nombreux outils ont été créés par la communauté pour calculer les récompenses ; vous trouverez une collection de ces outils organisés dans la [collection de guides communautaires](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). Vous pouvez également trouver une liste à jour des outils dans les canaux #Delegators et #Indexers sur le [serveur Discord](https://discord.gg/graphprotocol). Nous recommandons [un optimiseur d'allocation](https://github.com/graphprotocol/allocation-optimizer) intégré à la pile logicielle de l'Indexeur. +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### Qu'est-ce qu'une preuve d'indexation (POI) ? +### What is a proof of indexing (POI)? -Les POI sont utilisés dans le réseau pour vérifier qu'un indexeur indexe les subgraphs sur lesquels ils ont été alloués. Un POI pour le premier bloc de l'époque actuelle doit être soumis lors de la clôture d'une allocation pour que cette allocation soit éligible aux récompenses d'indexation. Un POI pour un bloc est un résumé de toutes les transactions du magasin d'entités pour un déploiement de subgraph spécifique jusqu'à ce bloc inclus. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### Quand les récompenses d’indexation sont-elles distribuées ? +### When are indexing rewards distributed? -Les allocations accumulent continuellement des récompenses pendant qu'elles sont actives et allouées dans un délai de 28 époques. Les récompenses sont collectées par les indexeurs et distribuées chaque fois que leurs allocations sont clôturées. Cela se produit soit manuellement, chaque fois que l'indexeur souhaite forcer leur fermeture, soit après 28 époques, un délégant peut fermer l'allocation pour l'indexeur, mais cela n'entraîne aucune récompense. 28 époques est la durée de vie maximale de l'allocation (à l'heure actuelle, une époque dure environ 24 heures). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### Les récompenses d’indexation en attente peuvent-elles être surveillées ? +### Can pending indexing rewards be monitored? -Le contrat RewardsManager a une fonction [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) en lecture seule qui peut être utilisée pour vérifier les récompenses en attente pour une allocation spécifique. +The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -De nombreux tableaux de bord créés par la communauté incluent des valeurs de récompenses en attente et ils peuvent être facilement vérifiés manuellement en suivant ces étapes : +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Interrogez le [réseau principal de subgraphs](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) pour obtenir les identifiants de toutes les allocations actives : +1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -56,139 +57,138 @@ query indexerAllocations { } ``` -Utilisez Etherscan pour appeler `getRewards()` : +Use Etherscan to call `getRewards()`: -- Naviguer vers [Interface d'étherscan pour le contrat de récompenses](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: + - Expand the **9. getRewards** dropdown. + - Enter the **allocationID** in the input. + - Click the **Query** button. -* Appeller `getRewards()`: - - Déroulez le menu **9. getRewards**. - - Saisissez le **allocationID** dans l'entrée. - - Cliquez sur le bouton **Requête**. +### What are disputes and where can I view them? -### Que sont les litiges et où puis-je les consulter ? +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -Les requêtes et les allocations de l'indexeur peuvent toutes deux être contestées sur The Graph pendant la période de contestation. Le délai de contestation varie selon le type de litige. Les requêtes/attestations ont une fenêtre de contestation de 7 époques, tandis que les allocations ont 56 époques. Passé ces délais, aucun litige ne peut être ouvert ni contre les attributions ni contre les requêtes. Lorsqu'un litige est ouvert, une caution d'un minimum de 10 000 GRT est exigée par les pêcheurs, qui sera verrouillée jusqu'à ce que le litige soit finalisé et qu'une résolution soit trouvée. Les pêcheurs sont tous les participants au réseau qui ouvrent des différends. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -Les différends sur **trois** enjeux possibles, tout comme la prudence des pêcheurs. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -- Si la contestation est rejetée, le GRT déposé par les Pêcheurs sera brûlé, et l'Indexeur contesté ne sera pas sabré. -- Si le différend est réglé par un match nul, la caution du pêcheur sera restituée et l'indexeur contesté ne sera pas réduit. -- Si la contestation est acceptée, le GRT déposé par les Pêcheurs sera restitué, l'Indexeur contesté sera réduit et les Pêcheurs gagneront 50% du GRT réduit. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -Les litiges peuvent être consultés dans l'interface utilisateur sur la page de profil d'un indexeur sous l'onglet `Différends`. +### What are query fee rebates and when are they distributed? -### Que sont les remises sur les frais de requête et quand sont-elles distribuées ? +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Les frais de requête sont collectés par la passerelle et distribués aux indexeurs selon la fonction de remise exponentielle (voir GIP [ici](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for -indexers/4162)). La fonction de remise exponentielle est proposée comme moyen de garantir que les indexeurs obtiennent le meilleur résultat en répondant fidèlement aux requêtes. Il fonctionne en incitant les indexeurs à allouer une part importante (qui peut être réduite en cas d'erreur lors de la réponse à une requête) par rapport au montant des frais de requête qu'ils peuvent percevoir. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -Une fois qu'une allocation a été clôturée, les remises peuvent être réclamées par l'indexeur. Lors de la réclamation, les remises sur les frais de requête sont distribuées à l'indexeur et à leurs délégués en fonction de la réduction des frais de requête et de la fonction de remise exponentielle. +### What is query fee cut and indexing reward cut? -### Qu'est-ce que la réduction des frais de requête et la réduction des récompenses d'indexation ? +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -Les valeurs `queryFeeCut` et `indexingRewardCut` sont des paramètres de délégation que l'indexeur peut définir avec cooldownBlocks pour contrôler la distribution du GRT entre l'indexeur et ses délégués. Consultez les dernières étapes dans [Jalonnement dans le protocole](/indexing/overview/#stake-in-the-protocol) pour obtenir des instructions sur la définition des paramètres de délégation. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **queryFeeCut** : % de remise sur les frais de requête qui sera distribuée à l'indexeur. Si ce taux est fixé à 95 %, l'indexeur recevra 95 % des frais de requête gagnés lorsqu'une allocation est clôturée, les 5 % restants étant reversés aux délégateurs. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -- **indexingRewardCut** - le % de récompenses d'indexation qui seront distribuées à l'indexeur. Si ce taux est fixé à 95 %, l'indexeur recevra 95 % des récompenses d'indexation lorsqu'une allocation est clôturée et les délégateurs se partageront les 5 % restants. +### How do Indexers know which subgraphs to index? -### Comment les indexeurs savent-ils quels subgraphs indexer ? +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -Les indexeurs peuvent se différencier en appliquant des techniques avancées pour prendre des décisions d'indexation de subgraphs, mais pour donner une idée générale, nous discuterons de plusieurs mesures clés utilisées pour évaluer les subgraphs du réseau : +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Signal de curation** : la proportion de signal de curation de réseau appliquée à un subgraph particulier est un bon indicateur de l'intérêt porté à ce subgraph, en particulier pendant la phase d'amorçage lorsque le volume des requêtes augmente. . +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Frais de requête collectés** : les données historiques sur le volume des frais de requête collectés pour un subgraph spécifique sont un bon indicateur de la demande future. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Montant mis en jeu** : surveiller le comportement des autres indexeurs ou examiner les proportions de la mise totale allouées à des subgraphs spécifiques peut permettre à un indexeur de surveiller l'offre pour les requêtes de subgraphs afin d'identifier les subgraphs qui le réseau fait preuve de confiance dans des subgraphs qui peuvent montrer un besoin d'approvisionnement accru. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -- **Subgraphs sans récompenses d'indexation** : certains subgraphs ne génèrent pas de récompenses d'indexation principalement parce qu'ils utilisent des fonctionnalités non prises en charge comme IPFS ou parce qu'ils interrogent un autre réseau en dehors du réseau principal. Vous verrez un message sur un subgraph s'il ne génère pas de récompenses d'indexation. +### What are the hardware requirements? -### Quelle est la configuration matérielle requise ? +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -- **Petit** - Assez pour commencer à indexer plusieurs subgraphs, il faudra probablement l'étendre. -- **Standard** - Configuration par défaut, c'est ce qui est utilisé dans l'exemple de manifeste de déploiement k8s/terraform. -- **Moyen** - Indexeur de production prenant en charge 100 subgraphs et 200 à 500 requêtes par seconde. -- **Large** : Prêt à indexer tous les subgraphs actuellement utilisés et à répondre aux demandes pour le trafic associé. +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -| Installation | Postgres
(CPUs) | Postgres
(mémoire en Gbs) | Postgres
(disque en TB) | VMs
(CPUs) | VMs
(mémoire en Gbs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Petit | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 11 | 12 | 48 | -| Moyen | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | | +### What are some basic security precautions an Indexer should take? -### Quelles sont les précautions de sécurité de base qu’un indexeur doit prendre ? +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Portefeuille d'opérateur** : la configuration d'un portefeuille d'opérateur est une précaution importante car elle permet à un indexeur de maintenir une séparation entre ses clés qui contrôlent la participation et celles qui contrôlent les opérations quotidiennes. opérations de jour. Voir [Participation dans le protocole](/indexing/overview/#stake-in-the-protocol) pour les instructions. - -- **Pare-feu** : seul le service Indexer doit être exposé publiquement et une attention particulière doit être accordée au verrouillage des ports d'administration et de l'accès à la base de données : le point de terminaison JSON-RPC de Graph Node (port par défaut : 8030), le point de terminaison de l'API de gestion de l'indexeur (port par défaut : 18000) et le point de terminaison de la base de données Postgres (port par défaut : 5432) ne doivent pas être exposés. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. ## Infrastructure -Au centre de l'infrastructure d'un indexeur se trouve le nœud Graph qui surveille les réseaux indexés, extrait et charge les données selon une définition de subgraph et le sert d'[API GraphQL. ](/about/#how-the-graph-works). Le nœud graph doit être connecté à un point de terminaison exposant les données de chaque réseau indexé ; un nœud IPFS pour la recherche de données ; une base de données PostgreSQL pour son magasin ; et des composants d'indexeur qui facilitent ses interactions avec le réseau. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **Base de données PostgreSQL** - Le magasin principal du nœud graph, c'est là que les données du subgraph sont stockées. Le service et l'agent Indexeur utilisent également la base de données pour stocker les données du canal d'état, les modèles de coûts, les règles d'indexation et les actions d'allocation. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Point de terminaison des données** : pour les réseaux compatibles EVM, Graph Node doit être connecté à un point de terminaison qui expose une API JSON-RPC compatible EVM. Cela peut prendre la forme d'un client unique ou d'une configuration plus complexe qui équilibre la charge sur plusieurs. Il est important de savoir que certains subgraphs nécessiteront des fonctionnalités client particulières telles que le mode archive et/ou l'API de traçage de parité. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **Nœud IPFS (version inférieure à 5)** : les métadonnées de déploiement de Subgraph sont stockées sur le réseau IPFS. Le nœud Graph accède principalement au nœud IPFS pendant le déploiement du subgraph pour récupérer le manifeste du sugraph et btous les fichiers liés. Les indexeurs de réseau n'ont pas besoin d'héberger leur propre nœud IPFS, un nœud IPFS pour le réseau est hébergé sur https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Service d'indexation** : gère toutes les communications externes requises avec le réseau. Partage les modèles de coûts et les statuts d'indexation, transmet les demandes de requête des passerelles à un nœud graphlm et gère les paiements des requêtes via les canaux d'état avec la passerelle. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Agent indexeur** - Facilite les interactions des indexeurs sur la chaîne, y compris l'enregistrement sur le réseau, la gestion des déploiements de subgraphs sur son(ses) nœud(s) graph(s) et la gestion des allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Serveur de métriques Prometheus** : les composants Graph Node et Indexer enregistrent leurs métriques sur le serveur de métriques. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Remarque : Pour prendre en charge la mise à l'échelle agile, il est recommandé de séparer les problèmes de requête et d'indexation entre différents ensembles de nœuds : nœuds de requête et nœuds d'index. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Aperçu des ports +### Ports overview -> **Important** : Soyez prudent lorsque vous exposez les ports publiquement : les **ports d'administration** doivent être maintenus verrouillés. Cela inclut les points de terminaison de gestion Graph Node JSON-RPC et Indexer détaillés ci-dessous. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Nœud de The Graph -| Port | Objectif | Routes | Argument CLI | Variable d'environnement | -| --- | --- | --- | --- | --- | -| 8000 | Serveur HTTP GraphQL
(pour les requêtes de subgraphs) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(pour les abonnements aux subgraphs) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(pour gérer les déploiements) | / | --admin-port | - | -| 8030 | API de statut d'indexation des subgraphs | /graphq | --index-node-port | - | -| 8040 | Métriques Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Service d'indexation +#### Indexer Service -| Port | Objectif | Routes | Argument CLI | Variable d'environnement | -| --- | --- | --- | --- | --- | -| 7600 | Serveur HTTP GraphQL
(pour les requêtes payantes de subgraphs) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Métriques Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Agent indexeur +#### Indexer Agent -| Port | Objectif | Routes | Argument CLI | Variable d'environnement | -| ---- | ---------------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | API de gestion des indexeurs | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Configurer l'infrastructure du serveur à l'aide de Terraform sur Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Note : Les indexeurs peuvent alternativement utiliser AWS, Microsoft Azure ou Alibaba. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Conditions préalables à l'installation +#### Install prerequisites - Google Cloud SDK -- Outil de ligne de commande kubectl +- Kubectl command line tool - Terraform -#### Créer un projet Google Cloud +#### Create a Google Cloud Project -- Cloner ou naviguez vers le [répertoire de l'Indexeur](https://github.com/graphprotocol/indexer). +- Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). -- Accédez au répertoire `./terraform`, c'est là que toutes les commandes doivent être exécutées. +- Navigate to the `./terraform` directory, this is where all commands should be executed. ```sh cd terraform ``` -- Authentifiez-vous auprès de Google Cloud et créez un nouveau projet. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Utilisez la page de facturation de Google Cloud Console pour activer la facturation du nouveau projet. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Créez une configuration Google Cloud. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Activez les API Google Cloud requises. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Créez un compte de service. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Activez le peering entre la base de données et le cluster Kubernetes qui sera créé à l'étape suivante. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,35 +249,35 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Créez un fichier de configuration Terraform minimal (mettez à jour si nécessaire). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **REMARQUE** : Toutes les variables de configuration d'exécution peuvent être appliquées soit en tant que paramètres à la commande au démarrage, soit en utilisant des variables d'environnement au format `COMPONENT_NAME_VARIABLE_NAME` (ex. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). #### Indexer agent ```sh graph-indexer-agent start \ - --ethereum \ + --ethereum \ --ethereum-network mainnet \ --mnemonic \ - --indexer-address \ + --indexer-address \ --graph-node-query-endpoint http://localhost:8000/ \ --graph-node-status-endpoint http://localhost:8030/graphql \ --graph-node-admin-endpoint http://localhost:8020/ \ --public-indexer-url http://localhost:7600/ \ - --indexer-geo-coordinates \ + --indexer-geo-coordinates \ --index-node-ids default \ --indexer-management-port 18000 \ --metrics-port 7040 \ @@ -481,8 +481,8 @@ graph-indexer-agent start \ --inject-dai true \ --postgres-host localhost \ --postgres-port 5432 \ - --postgres-username \ - --postgres-password \ + --postgres-username \ + --postgres-password \ --postgres-database indexer \ --allocation-management auto \ | pino-pretty @@ -494,21 +494,21 @@ graph-indexer-agent start \ SERVER_HOST=localhost \ SERVER_PORT=5432 \ SERVER_DB_NAME=is_staging \ -SERVER_DB_USER= \ -SERVER_DB_PASSWORD= \ +SERVER_DB_USER= \ +SERVER_DB_PASSWORD= \ graph-indexer-service start \ - --ethereum \ + --ethereum \ --ethereum-network mainnet \ --mnemonic \ - --indexer-address \ + --indexer-address \ --port 7600 \ --metrics-port 7300 \ --graph-node-query-endpoint http://localhost:8000/ \ --graph-node-status-endpoint http://localhost:8030/graphql \ --postgres-host localhost \ --postgres-port 5432 \ - --postgres-username \ - --postgres-password \ + --postgres-username \ + --postgres-password \ --postgres-database is_staging \ --network-subgraph-endpoint http://query-node-0:8000/subgraphs/id/QmUzRg2HHMpbgf6Q4VHKNDbtBEJnyp5JWCh2gUX9AV6jXv \ | pino-pretty @@ -516,56 +516,56 @@ graph-indexer-service start \ #### Indexer CLI -L'interface de ligne de commande de l'indexeur est un plugin pour [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible dans le terminal à `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Gestion de l'indexeur à l'aide de l'indexeur CLI +#### Indexer management using Indexer CLI -L'outil suggéré pour interagir avec l'**API de gestion de l'indexeur** est la **CLI de l'indexeur**, une extension de l'**Graphique CLI**. L'agent indexeur a besoin de l'entrée d'un indexeur afin d'interagir de manière autonome avec le réseau au nom de l'indexeur. Le mécanisme permettant de définir le comportement de l'agent Indexeur est le mode de **gestion des allocations** et les **règles d'indexation**. En mode automatique, un indexeur peut utiliser des **règles d'indexation** pour appliquer sa stratégie spécifique de sélection des subgraphs à indexer et pour lesquels servir des requêtes. Les règles sont gérées via une API GraphQL servie par l'agent et connue sous le nom d'API de gestion de l'indexeur. En mode manuel, un indexeur peut créer des actions d'allocation à l'aide de la **file d'attente d'actions** et les approuver explicitement avant qu'elles ne soient exécutées. En mode surveillance, les **règles d'indexation** sont utilisées pour remplir la **file d'attente des actions** et nécessitent également une approbation explicite pour l'exécution. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. #### Usage -La **Indexer CLI** se connecte à l'agent Indexer, généralement via la redirection de port, de sorte que la CLI n'a pas besoin de s'exécuter sur le même serveur ou cluster. Pour vous aider à démarrer et pour fournir un certain contexte, la CLI sera brièvement décrite ici. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- La **Indexer CLI** se connecte à l'agent Indexer, généralement via la redirection de port, de sorte que la CLI n'a pas besoin de s'exécuter sur le même serveur ou cluster. Pour vous aider à démarrer et pour fournir un certain contexte, la CLI sera brièvement décrite ici +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- Les règles de l'indexeur de graphs ` obtiennent [options] [ ...]` - Obtenez une ou plusieurs règles d'indexation en utilisant `all` comme `` pour obtenir toutes les règles, ou `global` pour obtenir les valeurs par défaut globales. Un argument supplémentaire `--merged` peut être utilisé pour spécifier que les règles spécifiques au déploiement sont fusionnées avec la règle globale. C'est ainsi qu'ils sont appliqués dans l'agent Indexeur. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `ensemble de règles de l'indexeur de graphs [options] ...` - Définissez une ou plusieurs règles d'indexation. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer Rules start [options] ` - Commencez à indexer un déploiement de subgraph si disponible et définissez sa `decisionBasis` sur `toujours`, donc l'agent Indexeur choisira toujours de l'indexer. Si la règle globale est définie sur toujours, tous les sugraphs disponibles sur le réseau seront indexés. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer Rules stop [options] ` - Arrêtez d'indexer un déploiement et définissez sa `decisionBasis` sur jamais, de sorte qu'il ignorera ce déploiement lors de la décision sur les déploiements à indice. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `règles de l'indexeur graphique peut-être [options] ` — Définissez le `decisionBasis` pour un déploiement sur `rules`, afin que l'agent indexeur utilisez des règles d'indexation pour décider d'indexer ou non ce déploiement. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. -- `graph indexer actions get [options] ` - Récupérez une ou plusieurs actions en utilisant `all` ou laissez `action-id` vide pour obtenir toutes les actions. Un argument supplémentaire `--status` peut être utilisé pour afficher toutes les actions d'un certain statut. +- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `file d'attente d'action de l'indexeur de graphs alloue ` - Action d'allocation de file d'attente +- `graph indexer action queue allocate ` - Queue allocation action -- `réaffectation de la file d'attente des actions de l'indexeur de graphs ` - Action de réaffectation de la file d'attente +- `graph indexer action queue reallocate ` - Queue reallocate action -- `file d'attente d'action de l'indexeur de graphs désallouer ` - Mettre en file d'attente l'action de désallocation +- `graph indexer action queue unallocate ` - Queue unallocate action -- Les actions de l'indexeur de graphs ` annulent [ ...]` - Annuler toutes les actions dans la file d'attente si l'identifiant n'est pas spécifié, sinon annuler le tableau d'identifiants avec un espace comme séparateur +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `les actions de l'indexeur de graphs approuvent [ ...]` - Approuver plusieurs actions à exécuter +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `les actions de l'indexeur de graphs exécutent l'approbation` - Force le travailleur à exécuter immédiatement les actions approuvées +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Toutes les commandes qui affichent les règles dans la sortie peuvent choisir entre les formats de sortie pris en charge (`table`, `yaml` et `json`) à l'aide du `- argument de sortie`. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### Règles d'indexation +#### Indexing rules -Les règles d'indexation peuvent être appliquées soit comme valeurs par défaut globales, soit pour des déploiements de subgraphs spécifiques à l'aide de leurs ID. Les champs `deployment` et `decisionBasis` sont obligatoires, tandis que tous les autres champs sont facultatifs. Lorsqu'une règle d'indexation a `rules` comme `decisionBasis`, l'agent indexeur comparera les valeurs de seuil non nulles sur cette règle avec les valeurs extraites du réseau pour le déploiement correspondant. Si le déploiement du subgraph a des valeurs supérieures (ou inférieures) à l'un des seuils, il sera choisi pour l'indexation. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Par exemple, si la règle globale a un `minStake` de **5** (GRT), tout déploiement de subgraph ayant plus de 5 (GRT) d'enjeu qui lui est alloué seront indexés. Les règles de seuil incluent `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake` et `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Modèle de données: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -Exemple d'utilisation de la règle d'indexation : +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### CLI de la file des actions +#### Actions queue CLI -L'indexer-cli fournit un module `actions` pour travailler manuellement avec le fichier d'attente d'actions. Il utilise l'**API Graphql** hébergée par le serveur de gestion de l'indexeur pour interagir avec le fichier d'attente des actions. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -Le travailleur d'exécution d'action ne récupérera les éléments de la file d'attente à exécuter que s'ils ont `ActionStatus = approved`. Dans le chemin recommandé, les actions sont ajoutées à la file d'attente avec ActionStatus = queued, elles doivent donc ensuite être approuvées afin d'être exécutées en chaîne. Le flux général ressemblera à : +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Action ajoutée à la file d'attente par l'outil d'optimisation tiers ou l'utilisateur indexeur-cli -- L'indexeur peut utiliser `indexer-cli` pour afficher toutes les actions en file d'attente -- L'indexeur (ou un autre logiciel) peut approuver ou annuler les actions dans la file d'attente à l'aide du `indexer-cli`. Les commandes d'approbation et d'annulation prennent un tableau d'identifiants d'action en entrée. -- L'agent d'exécution interroge régulièrement la file d'attente pour connaître les actions approuvées. Il récupérera les actions `approuvées` de la file d'attente, tentera de les exécuter et mettra à jour les valeurs dans la base de données en fonction de l'état d'exécution sur `succès` ou `échec`. -- Si une action réussit, le travailleur s'assurera qu'il existe une règle d'indexation qui indique à l'agent comment gérer l'allocation à l'avenir, utile lors de la réalisation d'actions manuelles lorsque l'agent est en mode `auto` ou `. mode surveillance`. -- L'indexeur peut surveiller la file d'attente des actions pour consulter un historique de l'exécution des actions et, si nécessaire, réapprouver et mettre à jour les éléments d'action en cas d'échec de leur exécution. La file d'attente des actions fournit un historique de toutes les actions mises en file d'attente et entreprises. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Modèle de données: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Exemple d'utilisation à partir de la source : +Example usage from source: ```bash graph indexer actions get all @@ -677,58 +677,57 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Notez que les types d'actions pris en charge pour la gestion des allocations ont des exigences d'entrée différentes : +Note that supported action types for allocation management have different input requirements: -- `Allocate` - allouer une participation à un déploiement de subgraph spécifique +- `Allocate` - allocate stake to a specific subgraph deployment - - paramètres d'action requis : + - required action params: - deploymentID - amount -- `Annuler l'allocation` - clôturer l'allocation, libérant la mise pour la réaffecter ailleurs +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - paramètres d'action requis : - - ID d'allocation + - required action params: + - allocationID - deploymentID - - paramètres d'action facultatifs : + - optional action params: - poi - - force (force l'utilisation du POI fourni même s'il ne correspond pas à ce que fournit le nœud graph) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Réallouer` - fermer atomiquement l'allocation et ouvrir une nouvelle allocation pour le même déploiement de subgraph +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - paramètres d'action requis : - - ID d'allocation + - required action params: + - allocationID - deploymentID - amount - - paramètres d'action facultatifs : + - optional action params: - poi - - force (force l'utilisation du POI fourni même s'il ne correspond pas à ce que fournit le nœud graph) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Modèles de coûts +#### Cost models -Les modèles de coûts fournissent une tarification dynamique pour les requêtes en fonction des attributs du marché et des requêtes. Le service Indexeur partage un modèle de coût avec les passerelles pour chaque subgraph pour lequel elles ont l'intention de répondre aux requêtes. Les passerelles, à leur tour, utilisent le modèle de coût pour prendre des décisions de sélection des indexeurs par requête et pour négocier le paiement avec les indexeurs choisis. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Le langage Agora fournit un format flexible pour déclarer des modèles de coûts pour les requêtes. Un modèle de prix Agora est une séquence d'instructions qui s'exécutent dans l'ordre pour chaque requête de niveau supérieur dans une requête GraphQL. Pour chaque requête de niveau supérieur, la première instruction qui y correspond détermine le prix de cette requête. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Une instruction est composée d'un prédicat, qui est utilisé pour faire correspondre les requêtes GraphQL, et d'une expression de coût qui, une fois évaluée, génère un coût en GRT décimal. Les valeurs dans la position d'argument nommé d'une requête peuvent être capturées dans le prédicat et utilisées dans l'expression. Des éléments globaux peuvent également être définis et remplacés par des espaces réservés dans une expression. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Exemple de modèle de coût : +Example cost model: ``` -# Cette instruction capture la valeur de saut, -# utiliser une expression booléenne dans le prédicat pour correspondre aux requêtes spécifiques qui utilisent `skip` -# et une expression de coût pour calculer le coût en fonction de la valeur `skip` et du SYSTEM_LOAD global +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; - -# Cette valeur par défaut correspondra à n'importe quelle expression GraphQL. -# Il utilise un Global substitué dans l'expression pour calculer le coût +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost default => 0.1 * $SYSTEM_LOAD; ``` -Exemple de calcul des coûts de requête utilisant le modèle ci-dessus : +Example query costing using the above model: | Query | Price | | ---------------------------------------------------------------------------- | ------- | @@ -736,83 +735,83 @@ Exemple de calcul des coûts de requête utilisant le modèle ci-dessus : | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Application du modèle de coût +#### Applying the cost model -Les modèles de coûts sont appliqués via la CLI Indexer, qui les transmet à l'API de gestion de l'indexeur de l'agent Indexer pour les stocker dans la base de données. Le service d'indexation les récupérera ensuite et fournira les modèles de coûts aux passerelles chaque fois qu'elles les demanderont. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interagir avec le réseau +## Interacting with the network -### Enjeu dans le protocole +### Stake in the protocol -Les premières étapes pour participer au réseau en tant qu'Indexeur sont d'approuver le protocole, de staker des fonds et (facultativement) de configurer une adresse opérateur pour les interactions quotidiennes avec le protocole. +The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. -> Note : Pour les besoins de ces instructions, Remix sera utilisé pour l'interaction avec le contrat, mais n'hésitez pas à utiliser l'outil de votre choix ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), et [MyCrypto](https://www.mycrypto.com/account) sont quelques autres outils connus). +> Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -Une fois qu'un indexeur a staké des GRT dans le protocole, les [composants de l'indexeur](/indexing/overview/#indexer-components) peuvent être démarrés et commencer leurs interactions avec le réseau. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Approuver les jetons +#### Approve tokens -1. Ouvrez l'[application Remix](https://remix.ethereum.org/) dans un navigateur +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. Dans `File Explorer`, créez un fichier nommé **GraphToken.abi** avec l'[ABI du jeton](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). -3. Avec `GraphToken.abi` sélectionné et ouvert dans l'éditeur, passez à la section `Déployer et exécuter des transactions` dans l’interface de Remix. +3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. Sous Environnement, sélectionnez `Injected Web3` et sous `Compte` sélectionnez votre adresse d'indexeur. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Définissez l'adresse du contrat GraphToken : collez l'adresse du contrat GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) à côté de `À l'adresse` et cliquez sur le bouton `À l'adresse` pour appliquer. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Appelez la fonction `approve(spender, montant)` pour approuver le contrat de Staking. Remplissez `spender` avec l'adresse du contrat de Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) et `montant` avec les jetons à miser (en wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Jetons de mise +#### Stake tokens -1. Ouvrez l'[application Remix](https://remix.ethereum.org/) dans un navigateur +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. Dans l'`Explorateur de fichiers`, créez un fichier nommé **Staking.abi** avec l'ABI de staking. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. -3. Avec `Staking.abi` sélectionné et ouvert dans l'éditeur, passez à la section `Déployer et exécuter des transactions` dans l’interface de Remix. +3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. Sous Environnement, sélectionnez `Injected Web3` et sous `Compte` sélectionnez votre adresse d'indexeur. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Définissez l'adresse du contrat de jalonnement - Collez l'adresse du contrat de jalonnement (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) à côté de `À l'adresse` et cliquez sur le bouton `À l'adresse` pour appliquer. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Appelez `stake()` pour implanter GRT dans le protocole. +6. Call `stake()` to stake GRT in the protocol. -7. (Facultatif) Les indexeurs peuvent approuver une autre adresse pour être l'opérateur de leur infrastructure d'indexeur afin de séparer les clés qui contrôlent les fonds de celles qui effectuent des actions quotidiennes telles que l'allocation sur des sugraphes et la réponse à des requêtes (payantes). Afin de définir l'opérateur, appelez `setOperator()` avec l'adresse de l'opérateur. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (Facultatif) Afin de contrôler la distribution des récompenses et d'attirer stratégiquement les délégués, les indexeurs peuvent mettre à jour leurs paramètres de délégation en mettant à jour leur indexingRewardCut (parties par million), queryFeeCut (parties par million) et cooldownBlocks (nombre de blocs). Pour ce faire, appelez `setDelegationParameters()`. L'exemple suivant définit queryFeeCut pour distribuer 95 % des remises sur les requêtes à l'indexeur et 5 % aux délégués, définit indexingRewardCut pour distribuer 60 % des récompenses d'indexation à l'indexeur et 40 % aux délégués, et définit `thecooldownBlocks`. période à 500 blocs. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) ``` -### Définition des paramètres de délégation +### Setting delegation parameters -La fonction `setDelegationParameters()` dans le [contrat de staking](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol) est essentielle pour les Indexeurs, leur permettant de définir des paramètres qui définissent leurs interactions avec les Délégateurs, influençant leur partage des récompenses et leur capacité de délégation. +The `setDelegationParameters()` function in the [staking contract](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol) is essential for Indexers, allowing them to set parameters that define their interactions with Delegators, influencing their reward sharing and delegation capacity. -### Comment définir les paramètres de délégation +### How to set delegation parameters -Pour définir les paramètres de délégation à l'aide de l'interface Graph Explorer, suivez ces étapes : +To set the delegation parameters using Graph Explorer interface, follow these steps: -1. Naviguez vers [Graph Explorer](https://thegraph.com/explorer/). -2. Connectez votre portefeuille. Choisissez multisig (comme Gnosis Safe) puis sélectionnez mainnet. Note : Vous devrez répéter ce processus pour Arbitrum One. -3. Connectez le portefeuille que vous avez en tant que signataire. -4. Accédez à la section "Settings" puis sélectionnez "Delegation Parameters". Ces paramètres doivent être configurés afin d’obtenir un taux effectif dans la fourchette souhaitée. Une fois les valeurs saisies dans les champs prévus, l’interface calcule automatiquement ce taux effectif. Ajustez les valeurs selon vos besoins pour atteindre le pourcentage effectif désiré. -5. Soumettez la transaction au réseau. +1. Navigate to [Graph Explorer](https://thegraph.com/explorer/). +2. Connect your wallet. Choose multisig (such as Gnosis Safe) and then select mainnet. Note: You will need to repeat this process for Arbitrum One. +3. Connect the wallet you have as a signer. +4. Navigate to the 'Settings' section and select 'Delegation Parameters'. These parameters should be configured to achieve an effective cut within the desired range. Upon entering values in the provided input fields, the interface will automatically calculate the effective cut. Adjust these values as necessary to attain the desired effective cut percentage. +5. Submit the transaction to the network. -> Note : Cette transaction devra être confirmée par les signataires du portefeuille multisig. +> Note: This transaction will need to be confirmed by the multisig wallet signers. -### La durée de vie d'une allocation +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Une fois qu'une allocation est créée on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) elle est considérée comme **active**. Une partie du staking de l'Indexeur et/ou du staking délégué est allouée à un déploiement de subgraph, ce qui leur permet de réclamer des récompenses d'indexation et de servir des requêtes pour ce déploiement de subgraph. L'agent Indexeur gère la création des allocations en fonction des règles de l'Indexeur. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. -- **Closed** - Un Indexeur est libre de fermer une allocation une fois qu'une époque est passée ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) ou son agent Indexeur fermera automatiquement l'allocation après le **maxAllocationEpochs** (actuellement 28 jours). Lorsqu'une allocation est fermée avec une preuve d'indexation (POI) valide, leurs récompenses d'indexation sont distribuées à l'Indexeur et à ses Délégateurs ([en savoir plus](/indexing/overview/#how-are-indexing-rewards-distributed)). +- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Il est recommandé aux indexeurs d'utiliser la fonctionnalité de synchronisation hors chaîne pour synchroniser les déploiements de subgraphs avec Chainhead avant de créer l'allocation en chaîne. Cette fonctionnalité est particulièrement utile pour les sous-graphes dont la synchronisation peut prendre plus de 28 époques ou qui risquent d'échouer de manière indéterministe. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 3dc6c5c4f60627b59892fab75282ecd32a7d3fdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:34 -0500 Subject: [PATCH 0071/1534] New translations overview.mdx (Spanish) --- website/src/pages/es/indexing/overview.mdx | 426 ++++++++++----------- 1 file changed, 213 insertions(+), 213 deletions(-) diff --git a/website/src/pages/es/indexing/overview.mdx b/website/src/pages/es/indexing/overview.mdx index 5b3e8b67d586..abe70137727b 100644 --- a/website/src/pages/es/indexing/overview.mdx +++ b/website/src/pages/es/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexación +title: Indexing Overview +sidebarTitle: Descripción --- Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. @@ -8,37 +9,37 @@ Los GRT que se depositan en stake en el protocolo está sujeto a un periodo de d Los Indexadores seleccionan subgrafos para indexar basados en la señal de curación del subgrafo, donde los Curadores realizan stake de sus GRT para indicar qué subgrafos son de mejor calidad y deben tener prioridad para ser indexados. Los consumidores (por ejemplo, aplicaciones, clientes) también pueden establecer parámetros para los cuales los Indexadores procesan consultas para sus subgrafos y establecen preferencias para el precio asignado a cada consulta. -## Preguntas frecuentes +## FAQ -### ¿Cuál es el stake mínimo necesario para ser Indexador en la red? +### What is the minimum stake required to be an Indexer on the network? -El stake mínimo para un Indexador es actualmente de 100.000 GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### ¿Cuáles son las fuentes de ingresos de un Indexador? +### What are the revenue streams for an Indexer? -**Reembolsos de Tarifas de consulta** - Pagos por servir consultas en la red. Estos pagos se realizan a través de canales de estado entre un Indexador y un gateway. Cada solicitud de consulta de un gateway contiene un pago y la respuesta correspondiente una prueba de la validez del resultado de la consulta. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Recompensas de indexación** - Generadas a través de una inflación anual del 3% en todo el protocolo, las recompensas de indexación se distribuyen a los Indexadores que indexan deploys de subgrafos para la red. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### ¿Cómo se distribuyen las recompensas de indexación? +### How are indexing rewards distributed? -Las recompensas de indexación proceden de la inflación del protocolo, que se fija en un 3% anual de emisión. Se distribuyen entre los subgrafos en función de la proporción de todas las señales de curación en cada uno de ellos y, a luego, se distribuyen proporcionalmente a los Indexadores en función de su allocated stake en ese subgrafo. **Una allocation debe cerrarse con una prueba válida de indexación (POI) que cumpla las normas establecidas por el acta de arbitraje para poder optar a las recompensas.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### ¿Qué es una prueba de indexación (POI)? +### What is a proof of indexing (POI)? -Las POIs se utilizan en la red para verificar que un Indexador está indexando los subgrafos en los que tiene allocation. Se debe enviar un POI para el primer bloque de la época actual al cerrar una allocation para que dicha allocation pueda optar a las recompensas de indexación. Un POI para un bloque es un resumen de todas las transacciones de las entidades involucradaas en el deployment de un subgrafo específico hasta ese bloque inclusive. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### ¿Cuándo se distribuyen las recompensas de indexación? +### When are indexing rewards distributed? -Las allocations acumulan recompensas continuamente mientras están activas y asignadas dentro de 28 épocas. Los Indexadores recogen las recompensas y las distribuyen cuando se cierran sus allocations. Esto ocurre manualmente, siempre que el Indexador quiera forzar el cierre, o después de 28 épocas un Delegador puede cerrar la allocation para el Indexador, pero esto da como resultado que no se generen recompensas. 28 épocas es la duración máxima de la allocation (ahora mismo, una época dura unas 24 horas). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### ¿Se pueden monitorear las recompensas de indexación pendientes? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Muchos de los paneles creados por la comunidad incluyen valores de recompensas pendientes y se pueden verificar fácilmente de forma manual siguiendo estos pasos: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,129 +57,128 @@ query indexerAllocations { } ``` -Utiliza Etherscan para solicitar el `getRewards()`: +Use Etherscan to call `getRewards()`: -- Navega hacia [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* Para llamar `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - Introduce el **allocationID** en la entrada. - - Haz clic en el botón **Query**. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### ¿Qué son las disputas y dónde puedo verlas? +### What are disputes and where can I view them? -Las consultas y allocation del Indexador se pueden disputar en The Graph durante el período de disputa. El período de disputa varía según el tipo de disputa. Las consultas tienen una ventana de disputa de 7 épocas, mientras que las allocation tienen 56 épocas. Una vez transcurridos estos períodos, no se pueden abrir disputas contra allocation o consultas. Cuando se abre una disputa, los Fishermen requieren un depósito mínimo de 10,000 GRT, que permanecerá bloqueado hasta que finalice la disputa y se haya dado una resolución. Los Fishermen (o pescadores) son todos los participantes de la red que abren disputas. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -Las disputas tienen **tres** posibles resultados, al igual que el depósito de los Fishermen. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- Si se rechaza la disputa, los GRT depositados por los Fishermen se quemarán y el Indexador en disputa no incurrirá en slashing. -- Si la disputa se resuelve como empate, se devolverá el depósito de los Fishermen y no se realizará slashing al Indexador en disputa. -- Si la disputa es aceptada, los GRT depositados por los Fishermen serán devueltos, el Indexador en disputa recibirá slashing y los Fishermen ganarán el 50% de los GRT en slashing. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -Las disputas se podran visualizar en la interfaz correspondiente al perfil del Indexador en la pestaña de `Disputes`. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### ¿Qué son los reembolsos de tarifas de consulta y cuándo se distribuyen? +### What are query fee rebates and when are they distributed? Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### ¿Qué es el recorte de la tarifa de consulta y el recorte de la recompensa de indexación? +### What is query fee cut and indexing reward cut? -Los valores de los `QueryFeeCut` e `IndexingRewardCut` son parámetros de delegación que el Indexador debe establecer junto con cooldownBlocks para controlar la distribución de GRT entre el Indexador y sus Delegadores. Hecha un vistazo de los últimos pasos de [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) para obtener instrucciones sobre la configuración de los parámetros de delegación. +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. - **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### ¿Cómo saben los Indexadores qué subgrafos indexar? +### How do Indexers know which subgraphs to index? -Los indexadores pueden diferenciarse aplicando técnicas avanzadas para tomar decisiones de indexación de subgrafos, pero para dar una idea general, discutiremos varias métricas clave que se utilizan para evaluar subgrafos en la red: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **Señal de Curación** - la proporción de señal de curación de la red aplicada a un subgrafo en particular es un buen indicador del interés en ese subgrafo, especialmente durante la fase de lanzamiento cuando el volumen de consultas aumenta. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Tarifas de consulta recogidas** - Los datos históricos del volumen de tarifas de consulta recogidas para un subgrafo específico son un buen indicador de la demanda futura. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Cantidad en stake** - Monitorear el comportamiento de otros Indexadores u observar las proporciones de stake total asignado a subgrafos específicos puede permitir a un Indexador monitorear el lado de la oferta de consultas de subgrafos para identificar subgrafos en los que la red está mostrando confianza o subgrafos que pueden mostrar una necesidad de más oferta. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Subgrafos sin recompensas de indexación** - Algunos subgrafos no generan recompensas de indexación principalmente porque utilizan funciones no compatibles como IPFS o porque están consultando otra red fuera de mainnet. Verás un mensaje en un subgrafo si no genera recompensas de indexación. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### ¿Cuáles son los requisitos de hardware? +### What are the hardware requirements? -- **Pequeño**: Lo suficiente como para comenzar a indexar varios subgrafos, es probable que deba expandirse. -- **Estándar**: Configuración predeterminada, esto es lo que se usa en los manifiestos de deploy de k8s/terraform de ejemplo. -- **Medio** - Indexador de producción que soporta 100 subgrafos y 200-500 solicitudes por segundo. -- **Grande**: Preparado para indexar todos los subgrafos utilizados actualmente y atender solicitudes para el tráfico relacionado. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Configuración | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Pequeño | 4 | 8 | 1 | 4 | 16 | -| Estándar | 8 | 30 | 1 | 12 | 48 | -| Medio | 16 | 64 | 2 | 32 | 64 | -| Grande | 72 | 468 | 3,5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### ¿Qué precauciones básicas de seguridad debe tomar un Indexador? +### What are some basic security precautions an Indexer should take? -- **Wallet del operador** - Configurar una wallet del operador es una precaución importante porque permite que un Indexador mantenga la separación entre sus claves que controlan el stake y las que controlan las operaciones diarias. Consulta [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) para obtener instrucciones. +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Firewall**: Solo el servicio de indexación debe exponerse públicamente y se debe prestar especial atención al bloqueo de los puertos de administración y el acceso a la base de datos: el endpoint JSON-RPC de Graph Node (puerto predeterminado: 8030), el endpoint de la API de administración del Indexador (puerto predeterminado: 18000) y el endpoint de la base de datos de Postgres (puerto predeterminado: 5432) no deben exponerse. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Infraestructura +## Infrastructure -En el centro de la infraestructura de un Indexador está el Graph Node que monitorea las redes que fueron indexadas, extrae y carga datos según una definición de un subgrafo y lo sirve como una [GraphQL API](/about/#how-the-graph-works). El Graph Node debe estar conectado a un endpoint que exponga datos de cada red indexada; un nodo IPFS para obtener datos; una base de datos PostgreSQL para su almacenamiento; y componentes del Indexador que facilitan sus interacciones con la red. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **Base de datos PostgreSQL**: El almacén principal para Graph Node, aquí es donde se almacenan los datos del subgrafo. El servicio y el agente del indexador también utilizan la base de datos para almacenar datos del canal de estado, modelos de costos, reglas de indexación y acciones de allocation. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - Para redes compatibles con EVM, Graph Node necesita estar conectado a un endpoint que exponga una API de JSON-RPC compatible con EVM. Esto puede tomar la forma de un solo cliente o podría ser una configuración más compleja que balancea la carga en múltiples clientes. Es importante tener en cuenta que ciertos subgrafos requerirán capacidades específicas del cliente, como el modo de archivo y / o la API de trazado de paridad. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **Nodo IPFS (versión inferior a 5)**: Los metadatos de deploy de Subgrafo se almacenan en la red IPFS. El Graph Node accede principalmente al nodo IPFS durante el deploy del subgrafo para obtener el manifiesto del subgrafo y todos los archivos vinculados. Los Indexadores de la red no necesitan alojar su propio nodo IPFS, un nodo IPFS para la red está alojado en https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Servicio de Indexador**: Gestiona todas las comunicaciones externas necesarias con la red. Comparte modelos de costos y estados de indexación, transfiere solicitudes de consulta desde el gateway a Graph Node y administra los pagos de consultas a través de canales de estado con la gateway. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Agente Indexador**: Facilita las interacciones de los Indexadores on-chain, incluido el registro en la red, la gestión de deploy de subgrafos en sus Graph Node y la gestión de allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Servidor de métricas de Prometheus** - Los componentes Graph Node y el Indexer registran sus métricas en el servidor de métricas. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Nota: Para admitir el escalado ágil, se recomienda que las inquietudes de consulta e indexación se separen entre diferentes conjuntos de nodos: nodos de consulta y nodos de índice. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Vista general de puertos +### Ports overview -> **Importante**: Ten cuidado con la exposición de los puertos públicamente; los **puertos de administración** deben mantenerse bloqueados. Esto incluye el Graph Node JSON-RPC y los endpoints de administración del Indexador que se detallan a continuación. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Puerto | Objeto | Rutas | Argumento CLI | Variable de Entorno | -| --- | --- | --- | --- | --- | -| 8000 | Servidor HTTP GraphQL
(para consultas de subgrafos) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(para suscripciones a subgrafos) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(para administrar implementaciones) | / | --admin-port | - | -| 8030 | API de estado de indexación de subgrafos | /graphql | --index-node-port | - | -| 8040 | Métricas de Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Servicio de Indexador +#### Indexer Service -| Puerto | Objeto | Rutas | Argumento CLI | Variable de Entorno | -| --- | --- | --- | --- | --- | -| 7600 | Servidor HTTP GraphQL
(para consultas de subgrafo pagadas) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Métricas de Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Agente Indexador +#### Indexer Agent -| Puerto | Objeto | Rutas | Argumento CLI | Variable de Entorno | -| ------ | ----------------------------- | ----- | ------------------------- | --------------------------------------- | -| 8000 | API de gestión de indexadores | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Configurar la infraestructura del servidor con Terraform en Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Nota: Los Indexadores pueden utilizar alternativamente AWS, Microsoft Azure o Alibaba. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Instalar requisitos previos +#### Install prerequisites -- SDK de Google Cloud -- Herramienta de línea de comandos de Kubectl +- Google Cloud SDK +- Kubectl command line tool - Terraform -#### Crear un proyecto de Google Cloud +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Nota: Para admitir el escalado ágil, se recomienda que las inquietudes de consu cd terraform ``` -- Autentícate con Google Cloud y crea un nuevo proyecto. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Usa la página de facturación de Google Cloud Console para habilitar la facturación del nuevo proyecto. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Crea una configuración de Google Cloud. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Habilita las API requeridas de Google Cloud. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Crea una cuenta de servicio. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Habilita el emparejamiento entre la base de datos y el cluster de Kubernetes que se creará en el siguiente paso. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,22 +249,22 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Crea un archivo de configuración mínimo de terraform (actualiza según sea necesario). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **NOTA**: Todas las variables de configuración de runtime se pueden aplicar como parámetros al comando en el inicio o usando variables de entorno con el formato `COMPONENT_NAME_VARIABLE_NAME`(ej. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### Agente Indexador +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Servicio de Indexador +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -514,58 +514,58 @@ graph-indexer-service start \ | pino-pretty ``` -#### CLI del Indexador +#### Indexer CLI -Indexer CLI es un complemento para [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accesible en la terminal de `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Gestión del Indexador mediante Indexer CLI +#### Indexer management using Indexer CLI -La herramienta sugerida para interactuar con la **API de gestión de Indexadores** es la **CLI de Indexadores**, una extensión de la **Graph CLI**. El agente del Indexador necesita información de un Indexador para interactuar de forma autónoma con la red en nombre del Indexador. El mecanismo para definir el comportamiento del agente Indexador son el modo de **gestión de allocation** y las **reglas de indexación**. En modo automático, un Indexador puede utilizar **reglas de indexación** para aplicar su estrategia específica de selección de subgrafos para indexar y servir consultas. Las reglas se gestionan a través de una API GraphQL servida por el agente y conocida como API de gestión de Indexadores. En modo manual, un Indexador puede crear acciones de allocation utilizando las **acciones en fila** y aprobarlas explícitamente antes de que se ejecuten. En el modo de supervisión, las **reglas de indexación** se utilizan para rellenar las **acciones en fila** y también requieren aprobación explícita para su ejecución. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Uso +#### Usage -La **CLI del Indexador** se conecta al agente Indexador, normalmente a través del reenvío de puertos, por lo que no es necesario que CLI se ejecute en el mismo servidor o clúster. Para ayudarte a comenzar y proporcionar algo de contexto, la CLI se describirá brevemente aquí. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` - Conéctate a la API de administración del Indexador. Normalmente, la conexión al servidor se abre mediante el reenvío de puertos, por lo que la CLI se puede operar fácilmente de forma remota. (Ejemplo: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Obtén una o más reglas de indexación usando `all` `` para obtener todas las reglas, o `global` para obtener los valores globales predeterminados. Se puede usar un argumento adicional `--merged` para especificar que las reglas específicas de implementación se fusionan con la regla global. Así es como se aplican en el agente Indexador. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - Establece una o más reglas de indexación. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Empieza a indexar un deploy de subgrafo si está disponible y establece su `decisionBasis` en `always`, por lo que el agente Indexador siempre elegirá indexarlo. Si la regla global se establece en siempre, se indexarán todos los subgrafos disponibles en la red. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` - Deja de indexar un deploy y establece tu `decisionBasis` en never (nunca), por lo que omitirá este deploy cuando decida qué deploy indexar. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` - Configura `thedecisionBasis` para un deploy en `rules`, de modo que el agente Indexador use las reglas de indexación para decidir si debe indexar este deploy. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` - Acción de allocation en fila +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` - Acción de reallocation en fila +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` - Acción de unallocation en fila +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - Cancela todas las acciones de la fila si id no se especifica, en caso contrario cancela una matriz de id con espacio como separador +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - Aprobar múltiples acciones para su ejecución +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` - Forzar al trabajador a ejecutar acciones aprobadas inmediatamente +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Todos los comandos que muestran reglas en la salida pueden elegir entre los formatos de salida admitidos (`table`, `yaml` y `json`) utilizando el argumento `-output`. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### Reglas de Indexación +#### Indexing rules -Las reglas de indexación se pueden aplicar como valores predeterminados globales o para deploy de subgrafos específicos usando sus ID. Los campos `deployment` y `decisionBasis` son obligatorios, mientras que todos los demás campos son opcionales. Cuando una regla de indexación tiene `rules` como `decisionBasis`, el agente Indexador comparará los valores de umbral no nulos en esa regla con los valores obtenidos de la red para la deploy correspondiente. Si el deploy del subgrafo tiene valores por encima (o por debajo) de cualquiera de los umbrales, se elegirá para la indexación. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Por ejemplo, si la regla global tiene un `minStake` de **5** (GRT), cualquier implementación de subgrafo que tenga más de 5 (GRT) de participación (stake) asignado a él será indexado. Las reglas de umbral incluyen `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake` y `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Modelo de Datos: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -Ejemplo de uso de la regla de indexación: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### CLI de la fila de acciones +#### Actions queue CLI -El Indexador-cli proporciona un módulo de `acciones` para trabajar manualmente con la fila de acciones. Utiliza la **API Graphql** alojada en el servidor de gestión del Indexador para interactuar con la fila de acciones. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -El trabajador de ejecución de acciones sólo tomará elementos de la fila para ejecutarlos si tienen `ActionStatus = approved`. En la ruta recomendada, las acciones se añaden a la fila con ActionStatus = queued, por lo que deben ser aprobadas para poder ejecutarse on-chain. El flujo general será el siguiente: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Acción añadida a la fila por la herramienta optimizadora de terceros o el usuario de Indexer-cli -- El Indexador puede utilizar el `indexer-cli` para ver todas las acciones en fila -- Indexador (u otro software) puede aprobar o cancelar acciones en la fila utilizando `indexer-cli`. Los comandos de aprobación y cancelación toman una variedad de ids de acciones como entrada. -- El trabajador de ejecución sondea regularmente la fila en busca de acciones aprobadas. Tomará las acciones `approved` de la fila, intentará ejecutarlas y actualizará los valores en la base de datos en función del estado de la ejecución de `success` o `failed`. -- Si una acción tiene éxito, el trabajador se asegurará de que haya una regla de indexación presente que indique al agente cómo gestionar la allocation en el futuro, lo que es útil cuando se toman acciones manuales mientras el agente está en modo `auto` o de `oversight`. -- El Indexador puede supervisar la fila de acciones para ver un historial de la ejecución de las acciones y, si es necesario, volver a aprobar y actualizar los elementos de acción si fallan en su ejecución. La fila de acciones proporciona un historial de todas las acciones en fila y ejecutadas. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Modelo de Datos: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Ejemplo de uso de la fuente: +Example usage from source: ```bash graph indexer actions get all @@ -677,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Ten en cuenta que los tipos de acción admitidos para la gestión de la allocation tienen diferentes requisitos de entrada: +Note that supported action types for allocation management have different input requirements: -- `Allocate` - asignar stake a un deploy de subgrafos específico +- `Allocate` - allocate stake to a specific subgraph deployment - - parámetros de acción requeridos: + - required action params: - deploymentID - - cantidad + - amount -- `Unallocate` - cierra la allocation, liberando el stake para reasignar en otro lugar +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - parámetros de acción requeridos: + - required action params: - allocationID - deploymentID - - parámetros de acción opcionales: + - optional action params: - poi - - force (obliga a utilizar el POI proporcionado aunque no coincida con lo que proporciona el graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - cerrar atómicamente la allocation y abrir una allocation nueva para el mismo deploy de subgrafos +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - parámetros de acción requeridos: + - required action params: - allocationID - deploymentID - - cantidad - - parámetros de acción opcionales: + - amount + - optional action params: - poi - - force (obliga a utilizar el POI proporcionado aunque no coincida con lo que proporciona el graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Modelos de Costos +#### Cost models -Los modelos de costes proporcionan precios dinámicos para las consultas basados en el mercado y los atributos de la consulta. El Servicio de Indexadores comparte un modelo de costes con las gateway para cada subgrafo para el que pretenden responder a las consultas. Las gateway, a su vez, utilizan el modelo de costes para tomar decisiones de selección de Indexadores por consulta y para negociar el pago con los Indexadores elegidos. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -El lenguaje Agora proporciona un formato flexible para declarar modelos de costos para consultas. Un modelo de precios de Agora es una secuencia de declaraciones que se ejecutan en orden para cada consulta de nivel superior en una consulta GraphQL. Para cada consulta de nivel superior, la primera declaración que coincide con ella determina el precio de esa consulta. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Una declaración se compone de un predicado, que se utiliza para hacer coincidir consultas GraphQL, y una expresión de costo que, cuando se evalúa, genera un costo en GRT decimal. Los valores en la posición del argumento nombrado de una consulta pueden capturarse en el predicado y usarse en la expresión. Los globales también se pueden establecer y sustituir por marcadores de posición en una expresión. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Ejemplo de modelo de costos: +Example cost model: ``` # This statement captures the skip value, @@ -727,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -Ejemplo de modelo de costo usando el modelo anterior: +Example query costing using the above model: -| Consulta | Precio | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | -| { pairs(skip: 5000) { id } } | 0,5 GRT | -| { tokens { symbol } } | 0,1 GRT | -| { pairs(skip: 5000) { id } tokens { symbol } } | 0,6 GRT | +| { pairs(skip: 5000) { id } } | 0.5 GRT | +| { tokens { symbol } } | 0.1 GRT | +| { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Aplicación del modelo de costos +#### Applying the cost model -Los modelos de costos se aplican a través de la CLI del Indexador, que los pasa a la API de Administración de Indexador del agente Indexador para almacenarlos en la base de datos. Luego, el Servicio del Indexador los recogerá y entregará los modelos de costos a las gateway siempre que los soliciten. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interactuar con la red +## Interacting with the network -### Stake en el protocolo +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -Una vez que un Indexador ha stakeado GRT en el protocolo, los [Indexer components](/indexing/overview/#indexer-components) pueden iniciarse y comenzar sus interacciones con la red. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Aprobar tokens +#### Approve tokens -1. Abre la [Remix app](https://remix.ethereum.org/) en un navegador +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. En el `File Explorer`, crea un archivo llamado **GraphToken.abi** con [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. En entorno, selecciona `Injected Web3` y en `Account` selecciona tu dirección de Indexador. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Establece la dirección del contrato GraphToken: pega la dirección del contrato GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) junto a `At Address` y haz clic en el botón `At address` para aplicar. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Llame a la función `approve(spender, amount)` para aprobar el contrato de Staking. Completa `spender` con la dirección del contrato de Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) y `amount` con los tokens en stake (en wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Staking de tokens +#### Stake tokens -1. Abre la [Remix app](https://remix.ethereum.org/) en un navegador +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. En el `File Explorer`, crea un archivo llamado **Staking.abi** con la ABI de staking. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. En entorno, selecciona `Injected Web3` y en `Account` selecciona tu dirección de Indexador. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Establece la dirección del contrato de staking - Pega la dirección del contrato de Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) junto a `At Address` y haz clic en el botón `At address` para aplicar. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Llama a `stake()` para stakear GRT en el protocolo. +6. Call `stake()` to stake GRT in the protocol. -7. (Opcional) Los Indexadores pueden aprobar otra dirección para que sea el operador de su infraestructura de indexación a fin de separar las claves que controlan los fondos de las que realizan acciones cotidianas, como la asignación en subgrafos y el servicio de consultas (pagadas). Para configurar el operador, llama a `setOperator()` con la dirección del operador. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (Opcional) Para controlar la distribución de recompensas y atraer estratégicamente a los Delegadores, los indexadores pueden actualizar sus parámetros de delegación actualizando su indexingRewardCut (partes por millón), queryFeeCut (partes por millón) y cooldownBlocks (número de bloques). Para hacerlo, llama a `setDelegationParameters()`. El siguiente ejemplo establece queryFeeCut para distribuir el 95% de los reembolsos de consultas al Indexador y el 5% a los Delegadores, establece indexingRewardCut para distribuir el 60% de las recompensas de indexación al Indexador y el 40% a los Delegadores, y establece `thecooldownBlocks` período a 500 bloques. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### La vida de una allocation +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Se recomienda a los Indexadores que utilicen la funcionalidad de sincronización fuera de la cadena para sincronizar el deploy de subgrafos con el cabezal de la cadena antes de crear la allocation on-chain. Esta función es especialmente útil para subgrafos que pueden tardar más de 28 épocas en sincronizarse o que tienen algunas posibilidades de fallar de forma indeterminada. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From d0dd2dd6f51362f87e154bea761685f9739aa3c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:35 -0500 Subject: [PATCH 0072/1534] New translations overview.mdx (Arabic) --- website/src/pages/ar/indexing/overview.mdx | 288 ++++++++++----------- 1 file changed, 144 insertions(+), 144 deletions(-) diff --git a/website/src/pages/ar/indexing/overview.mdx b/website/src/pages/ar/indexing/overview.mdx index b619e448d436..a665457d5352 100644 --- a/website/src/pages/ar/indexing/overview.mdx +++ b/website/src/pages/ar/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexing +title: Indexing Overview +sidebarTitle: نظره عامة --- Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. @@ -8,7 +9,7 @@ GRT that is staked in the protocol is subject to a thawing period and can be sla يختار المفهرسون subgraphs للقيام بالفهرسة بناء على إشارة تنسيق subgraphs ، حيث أن المنسقون يقومون ب staking ل GRT وذلك للإشارة ل Subgraphs عالية الجودة. يمكن أيضا للعملاء (مثل التطبيقات) تعيين بارامترات حيث يقوم المفهرسون بمعالجة الاستعلامات ل Subgraphs وتسعير رسوم الاستعلام. -## الأسئلة الشائعة +## FAQ ### What is the minimum stake required to be an Indexer on the network? @@ -26,11 +27,11 @@ Indexing rewards come from protocol inflation which is set to 3% annual issuance Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### ما هو إثبات الفهرسة (POI)؟ +### What is a proof of indexing (POI)? POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### متى يتم توزيع مكافآت الفهرسة؟ +### When are indexing rewards distributed? Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). @@ -38,46 +39,45 @@ Allocations are continuously accruing rewards while they're active and allocated The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -تشتمل العديد من لوحات المعلومات التي أنشأها المجتمع على قيم المكافآت المعلقة ويمكن التحقق منها بسهولة يدويًا باتباع الخطوات التالية: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql -} query indexerAllocations -} indexer(id: "") - } allocations - } activeForIndexer - } allocations +query indexerAllocations { + indexer(id: "") { + allocations { + activeForIndexer { + allocations { id - { - { - { - { -{ + } + } + } + } +} ``` -استخدم Etherscan لاستدعاء `()getRewards`: - -- انتقل إلى [ واجهة Etherscan لعقد المكافآت Rewards contract ](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +Use Etherscan to call `getRewards()`: -* لاستدعاء `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - أدخل ** معرّف التخصيص ** في الإدخال. - - انقر فوق الزر ** الاستعلام **. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### ما هي الاعتراضات disputes وأين يمكنني عرضها؟ +### What are disputes and where can I view them? -يمكن الاعتراض على استعلامات المفهرس وتخصيصاته على The Graph أثناء فترة الاعتراض dispute. تختلف فترة الاعتراض حسب نوع الاعتراض. تحتوي الاستعلامات / الشهادات Queries/attestations على نافذة اعتراض لـ 7 فترات ، في حين أن المخصصات لها 56 فترة. بعد مرور هذه الفترات ، لا يمكن فتح اعتراضات ضد أي من المخصصات أو الاستعلامات. عند فتح الاعتراض ، يجب على الصيادين Fishermen إيداع على الأقل 10000 GRT ، والتي سيتم حجزها حتى يتم الانتهاء من الاعتراض وتقديم حل. الصيادون Fisherman هم المشاركون في الشبكة الذين يفتحون الاعتراضات. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -يمكنك عرض الاعتراضات من واجهة المستخدم في صفحة ملف تعريف المفهرس وذلك من علامة التبويب `Disputes`. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- إذا تم رفض الاعتراض، فسيتم حرق GRT المودعة من قبل ال Fishermen ، ولن يتم شطب المفهرس المعترض عليه. -- إذا تمت تسوية الاعتراض بالتعادل، فسيتم إرجاع وديعة ال Fishermen ، ولن يتم شطب المفهرس المعترض عليه. -- إذا تم قبول الاعتراض، فسيتم إرجاع GRT التي أودعها الFishermen ، وسيتم شطب المفهرس المعترض عليه وسيكسب Fishermen ال 50٪ من GRT المشطوبة. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -يمكن عرض الاعتراضات في واجهة المستخدم في بروفايل المفهرس ضمن علامة التبويب `Disputes`. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### ما هي خصومات رسوم الاستعلام ومتى يتم توزيعها؟ +### What are query fee rebates and when are they distributed? Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. @@ -93,29 +93,29 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that ### How do Indexers know which subgraphs to index? -من خلال تطبيق تقنيات متقدمة لاتخاذ قرارات فهرسة ال subgraph ، وسنناقش العديد من المقاييس الرئيسية المستخدمة لتقييم ال subgraphs في الشبكة: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **إشارة التنسيق Curation signal** ـ تعد نسبة إشارة تنسيق الشبكة على subgraph معين مؤشرا جيدا على الاهتمام بهذا ال subgraph، خاصة أثناء المراحل الأولى عندما يزداد حجم الاستعلام. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **مجموعة رسوم الاستعلام Query fees collected** ـ تعد البيانات التاريخية لحجم مجموعة رسوم الاستعلام ل subgraph معين مؤشرا جيدا للطلب المستقبلي. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. - **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **ال Subgraphs التي بدون مكافآت فهرسة** ـ بعض الsubgraphs لا تنتج مكافآت الفهرسة بشكل أساسي لأنها تستخدم ميزات غير مدعومة مثل IPFS أو لأنها تستعلم عن شبكة أخرى خارج الشبكة الرئيسية mainnet. سترى رسالة على ال subgraph إذا لا تنتج مكافآت فهرسة. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### ما هي المتطلبات للهاردوير؟ +### What are the hardware requirements? -- **صغيرة**ـ يكفي لبدء فهرسة العديد من ال subgraphs، من المحتمل أن تحتاج إلى توسيع. -- ** قياسية ** - هو الإعداد الافتراضي ، ويتم استخدامه في مثال بيانات نشر k8s / terraform. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **كبيرة** - مُعدة لفهرسة جميع ال subgraphs المستخدمة حاليا وأيضا لخدمة طلبات حركة مرور البيانات ذات الصلة. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| صغير | 4 | 8 | 1 | 4 | 16 | -| قياسي | 8 | 30 | 1 | 12 | 48 | -| متوسط | 16 | 64 | 2 | 32 | 64 | -| كبير | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -123,7 +123,7 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that - **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## البنية الأساسية +## Infrastructure At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. @@ -133,52 +133,52 @@ At the center of an Indexer's infrastructure is the Graph Node which monitors th - **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **خدمة المفهرس Indexer service**- يتعامل مع جميع الاتصالات الخارجية المطلوبة مع الشبكة. ويشارك نماذج التكلفة وحالات الفهرسة ، ويمرر طلبات الاستعلام من البوابات gateways إلى Graph Node ، ويدير مدفوعات الاستعلام عبر قنوات الحالة مع البوابة. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Prometheus metrics server** - مكونات The Graph Node والمفهرس يسجلون مقاييسهم على سيرفر المقاييس. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -ملاحظة: لدعم القياس السريع ، يستحسن فصل الاستعلام والفهرسة بين مجموعات مختلفة من العقد Nodes: عقد الاستعلام وعقد الفهرس. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### نظرة عامة على المنافذ Ports +### Ports overview > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...

/subgraphs/name/.../... | http-port-- | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...

/subgraphs/name/.../... | ws-port-- | - | -| 8020 | JSON-RPC
(for managing deployments) | / | admin-port-- | - | -| 8030 | Subgraph indexing status API | /graphql | index-node-port-- | - | -| 8040 | Prometheus metrics | /metrics | metrics-port-- | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### خدمة المفهرس +#### Indexer Service -| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | port-- | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | metrics-port-- | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### وكيل المفهرس(Indexer Agent) +#### Indexer Agent -| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | -| ------ | ----------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | API إدارة المفهرس | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### قم بإعداد البنية الأساسية للسيرفر باستخدام Terraform على جوجل كلاود +### Setup server infrastructure using Terraform on Google Cloud -> ملاحظة: يمكن للمفهرسين كبديل استخدام خدمات أمازون ويب، أو مايكروسوفت أزور، أو علي بابا. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### متطلبات التثبيت +#### Install prerequisites - Google Cloud SDK -- أداة سطر أوامر Kubectl +- Kubectl command line tool - Terraform -#### أنشئ مشروع Google Cloud +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ At the center of an Indexer's infrastructure is the Graph Node which monitors th cd terraform ``` -- قم بالتوثيق بواسطة Google Cloud وأنشئ مشروع جديد. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- استخدم [صفحة الفوترة] في Google Cloud Console لتمكين الفوترة للمشروع الجديد. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- قم بإنشاء Google Cloud configuration. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- قم بتفعيل Google Cloud APIs المطلوبة. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- قم بإنشاء حساب الخدمة حساب الخدمة. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- قم بتفعيل ال peering بين قاعدة البيانات ومجموعة Kubernetes التي سيتم إنشاؤها في الخطوة التالية. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,22 +249,22 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- قم بإنشاء الحد الأدنى من ملف التهيئة ل terraform (التحديث حسب الحاجة). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **ملاحظة**: جميع متغيرات الإعدادات الخاصة بوقت التشغيل يمكن تطبيقها إما كبارامترات للأمر عند بدء التشغيل أو باستخدام متغيرات البيئة بالتنسيق `COMPONENT_NAME_VARIABLE_NAME` (على سبيل المثال `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### وكيل المفهرس(Indexer Agent) +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### خدمة المفهرس Indexer service +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -514,9 +514,9 @@ graph-indexer-service start \ | pino-pretty ``` -#### CLI المفهرس +#### Indexer CLI -CLI المفهرس هو مكون إضافي لـ [`graphprotocol/graph-cli@`](https://www.npmjs.com/package/@graphprotocol/graph-cli) يمكن الوصول إليه عند `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 @@ -527,7 +527,7 @@ graph indexer status The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### الاستخدام +#### Usage The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. @@ -535,11 +535,11 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - قم بتعيين قاعدة أو أكثر من قواعد الفهرسة. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. - `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` - توقف عن فهرسة النشر deployment وقم بتعيين ملف `decisionBasis` إلىnever أبدًا ، لذلك سيتم تخطي هذا النشر عند اتخاذ قرار بشأن عمليات النشر للفهرسة. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. - `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. @@ -557,15 +557,15 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -جميع الأوامر التي تعرض القواعد في الخرج output يمكنها الاختيار بين تنسيقات الإخراج المدعومة (`table`, `yaml`, `json`) باستخدام `-output` argument. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### قواعد الفهرسة +#### Indexing rules Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -على سبيل المثال ، إذا كانت القاعدة العامة لديها`minStake` من ** 5 ** (GRT) ، فأي نشر subgraph به أكثر من 5 (GRT) من الحصة المخصصة ستتم فهرستها. قواعد العتبة تتضمن `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -نموذج البيانات Data model: +Data model: ```graphql type IndexingRule { @@ -615,7 +615,7 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: - Action added to the queue by the 3rd party optimizer tool or indexer-cli user - Indexer can use the `indexer-cli` to view all queued actions @@ -624,7 +624,7 @@ The action execution worker will only grab items from the queue to execute if th - If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. - The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -نموذج البيانات Data model: +Data model: ```graphql Type ActionInput { @@ -704,17 +704,17 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### نماذج التكلفة Cost models +#### Cost models Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -توفر لغة Agora تنسيقا مرنا للإعلان عن نماذج التكلفة للاستعلامات. نموذج سعر Agora هو سلسلة من العبارات التي يتم تنفيذها بالترتيب لكل استعلام عالي المستوى في GraphQL. بالنسبة إلى كل استعلام عالي المستوى top-level ، فإن العبارة الأولى التي تتطابق معه تحدد سعر هذا الاستعلام. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -تتكون العبارة من المسند predicate ، والذي يستخدم لمطابقة استعلامات GraphQL وتعبير التكلفة والتي عند تقييم النواتج تكون التكلفة ب GRT عشري. قيم الاستعلام الموجودة في ال argument ،قد يتم تسجيلها في المسند predicate واستخدامها في التعبير expression. يمكن أيضًا تعيين Globals وتعويضه في التعبير expression. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -مثال لتكلفة الاستعلام باستخدام النموذج أعلاه: +Example cost model: ``` # This statement captures the skip value, @@ -727,24 +727,24 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -مثال على نموذج التكلفة: +Example query costing using the above model: -| الاستعلام | السعر | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### تطبيق نموذج التكلفة +#### Applying the cost model Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh -'indexer cost set variables '{ "SYSTEM_LOAD": 1.4 } +indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## التفاعل مع الشبكة +## Interacting with the network ### Stake in the protocol @@ -754,33 +754,33 @@ The first steps to participating in the network as an Indexer are to approve the Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### اعتماد التوكن tokens +#### Approve tokens -1. افتح [ تطبيق Remix ](https://remix.ethereum.org/) على المتصفح +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. في `File Explorer` أنشئ ملفا باسم ** GraphToken.abi ** باستخدام [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. قم بتعيين عنوان GraphToken - الصق العنوان (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) بجوار `At Address` وانقر على الزر `At address` لتطبيق ذلك. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. استدعي دالة `approve(spender, amount)` للموافقة على عقد Staking. املأ `spender` بعنوان عقد Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) واملأ `amount` بالتوكن المراد عمل staking لها (في wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). #### Stake tokens -1. افتح [ تطبيق Remix ](https://remix.ethereum.org/) على المتصفح +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. في `File Explorer` أنشئ ملفا باسم ** Staking.abi ** باستخدام Staking ABI. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. عيّن عنوان عقد Staking - الصق عنوان عقد Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) بجوار `At address` وانقر على الزر `At address` لتطبيق ذلك. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. استدعي `stake()` لوضع GRT في البروتوكول. +6. Call `stake()` to stake GRT in the protocol. 7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### عمر التخصيص allocation +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 3abbfb0efa17eaff4e6a779b9c1c3513feeb4a4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:36 -0500 Subject: [PATCH 0073/1534] New translations overview.mdx (Czech) --- website/src/pages/cs/indexing/overview.mdx | 426 ++++++++++----------- 1 file changed, 213 insertions(+), 213 deletions(-) diff --git a/website/src/pages/cs/indexing/overview.mdx b/website/src/pages/cs/indexing/overview.mdx index 167e949056bc..cf26f9abf9bd 100644 --- a/website/src/pages/cs/indexing/overview.mdx +++ b/website/src/pages/cs/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexování +title: Indexing Overview +sidebarTitle: Přehled --- Indexery jsou operátoři uzlů v síti Graf, kteří sázejí graf tokeny (GRT), aby mohli poskytovat služby indexování a zpracování dotazů. Indexátoři za své služby získávají poplatky za dotazy a odměny za indexování. Získávají také poplatky za dotazy, které jsou vráceny podle exponenciální funkce vrácení. @@ -10,35 +11,35 @@ Indexátory vybírají podgrafy k indexování na základě signálu kurátorů ## FAQ -### Jaký je minimální podíl potřebný k tomu, abyste se mohli stát indexátorem v síti? +### What is the minimum stake required to be an Indexer on the network? -Minimální vklad pro indexátora je v současné době nastaven na 100k GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### Jaké jsou toky příjmů pro indexátora? +### What are the revenue streams for an Indexer? -**Slevy z poplatků za dotazy** - Platby za obsluhu dotazů v síti. Tyto platby jsou zprostředkovány prostřednictvím stavových kanálů mezi indexerem a bránou. Každý dotazový požadavek z brány obsahuje platbu a odpovídající odpověď doklad o platnosti výsledku dotazu. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Odměny za indexování** - Odměny za indexování, generované prostřednictvím 3% roční inflace v rámci celého protokolu, jsou rozdělovány indexátorům, kteří indexují rozmístění podgrafů pro síť. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### Jak se rozdělují odměny za indexaci? +### How are indexing rewards distributed? -Odměny za indexaci pocházejí z protokolární inflace, která je stanovena na 3 % ročně. Rozdělují se mezi podgrafy na základě podílu všech kurátorských signálů na každém z nich a poté se poměrně rozdělí indexátorům na základě jejich přiděleného podílu na daném podgrafu. **Alokace musí být uzavřena platným důkazem indexace (POI), který splňuje standardy stanovené rozhodčí chartou, aby bylo možné získat odměny.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### Co je Důkaz indexování (POI)? +### What is a proof of indexing (POI)? -V síti se používají Proofy indexování (POI) k ověření, zda indexátor skutečně indexuje podgrafy, na které má alokovanou kapacitu. Pro uzavření alokace a možnost získat odměny za indexování je nutné při uzavírání alokace předložit POI pro první blok aktuální epochy. POI pro daný blok je souhrn všech transakcí v úložišti entit pro konkrétní nasazení podgrafu, a to až do tohoto bloku včetně. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### Kdy se rozdělují odměny za indexaci? +### When are indexing rewards distributed? -Za přidělení se průběžně připisují odměny, dokud jsou aktivní a přidělené během 28 epoch. Odměny jsou shromažďovány indexátory a rozdělovány vždy, když jsou jejich alokace uzavřeny. To se děje buď ručně, kdykoli je chce indexátor násilně uzavřít, nebo po 28 epochách může alokaci za indexátora uzavřít delegát, což však nevede k žádným odměnám. 28 epoch je maximální doba životnosti alokace (právě teď trvá jedna epocha ~24 hodin). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### Lze sledovat čekající odměny za indexaci? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Mnoho informačních panelů vytvořených komunitou obsahuje hodnoty čekajících odměn a lze je snadno zkontrolovat ručně podle následujících kroků: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,129 +57,128 @@ query indexerAllocations { } ``` -Pomocí funkce Etherscan zavolejte `getRewards()`: +Use Etherscan to call `getRewards()`: -- Přejděte na [Etherscan rozhraní na smlouvu odměny](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* Volání funkce `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - Do vstupu zadejte **allocationID**. - - Klikněte na tlačítko **Dotaz**. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### Co jsou to spory a kde si je mohu prohlédnout? +### What are disputes and where can I view them? -Dotazy indexátoru i alokace lze v grafu zpochybnit během sporného období. Období sporu se liší v závislosti na typu sporu. Dotazy/atesty mají 7 epochové sporné okno, zatímco alokace mají 56 epoch. Po uplynutí těchto období nelze zahájit spor ani proti alokacím, ani proti dotazům. Při zahájení sporu musí rybáři složit zálohu v minimální výši 10,000 GRT, která bude zablokována až do ukončení sporu a vydání rozhodnutí. Rybáři jsou všichni účastníci sítě, kteří otevírají spory. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -Spory mají **tři** možné výsledky, stejně tak vklad rybářů. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- Pokud bude spor zamítnut, GRT složené rybáři budou spáleny a sporný indexátor nebude krácen. -- Pokud je spor vyřešen nerozhodně, bude Fishermen's vklad vrácen a sporný indexátor nebude penalizován snížením stake. -- Pokud je spor uznán, bude Fishermen's vklad vrácen, sporný indexátor bude penalizován snížením stake a Fishermen obdrží 50 % sníženého stake. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -Spory lze zobrazit v UI na stránce profilu indexátora na kartě `Spory`. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### Co jsou to slevy z poplatků za dotaz a kdy se rozdělují? +### What are query fee rebates and when are they distributed? -Poplatky za dotazy vybírá brána a rozděluje je indexátorům podle exponenciální funkce rabatu (viz GIP [zde](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). Exponenciální funkce rabatu je navržena jako způsob, jak zajistit, aby indexátory dosáhly nejlepšího výsledku věrným obsloužením dotazů. Funguje tak, že motivuje indexátory, aby přidělovali velké množství podílu (který může být snížen za chybu při obsluze dotazu) v poměru k výši poplatků za dotazy, které mohou inkasovat. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Po uzavření přídělu může indexátor požádat o slevy. Po uplatnění nároku jsou slevy z poplatku za dotaz rozděleny mezi indexátora a jeho delegáty na základě snížení poplatku za dotaz a exponenciální funkce slevy. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### Co je to snížení poplatku za dotaz a snížení odměny za indexaci? +### What is query fee cut and indexing reward cut? -Hodnoty `queryFeeCut` a `indexingRewardCut` jsou parametry delegování, které může indexátor nastavit spolu s bloky cooldownBlocks a řídit tak rozdělení GRT mezi indexátor a jeho delegáty. Pokyny k nastavení parametrů delegování naleznete v posledních krocích v části [Zadání protokolu](/indexing/overview/#stake-in-the-protocol). +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - % slev z poplatků za dotaz, které budou rozděleny mezi indexátory. Pokud je tato hodnota nastavena na 95%, obdrží indexátor 95% poplatků za dotaz získaných při uzavření přídělu a zbylých 5% připadne delegátům. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - % odměn za indexování, které budou rozděleny indexátoru. Pokud je tato hodnota nastavena na 95 %, obdrží indexátor při uzavření přídělu 95 % odměn za indexování a zbylých 5 % si rozdělí delegáti. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### Jak indexátory poznají, které podgrafy mají indexovat? +### How do Indexers know which subgraphs to index? -Indexátory se mohou odlišovat použitím pokročilých technik pro rozhodování o indexaci podgrafů, ale pro obecnou představu probereme několik klíčových metrik používaných k hodnocení podgrafů v síti: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **Signál kurátorství** - Podíl signálu kurátorství sítě aplikovaného na určitý podgraf je dobrým ukazatelem zájmu o tento podgraf, zejména během zaváděcí fáze, kdy se zvyšuje objem dotazů. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Vybrané poplatky za dotazy** - Historické údaje o objemu vybraných poplatků za dotazy pro určitý podgraf jsou dobrým ukazatelem budoucí poptávky. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Amount staked** - Sledování chování ostatních indexátorů nebo podílů celkového vkladu přiděleného konkrétním podgrafům může indexátoru umožnit sledovat stranu nabídky pro dotazy na podgrafy a identifikovat podgrafy, kterým síť důvěřuje, nebo podgrafy, které mohou vykazovat potřebu větší nabídky. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Podgrafy bez odměn za indexování** - Některé podgrafy negenerují odměny za indexování především proto, že používají nepodporované funkce, jako je IPFS, nebo protože se dotazují na jinou síť mimo hlavní síť. Pokud podgraf negeneruje odměny za indexování, zobrazí se u něj tato zpráva. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### Jaké jsou požadavky na hardware? +### What are the hardware requirements? -- **Malý** - Dostatečný pro začátek indexování několika podgrafů, pravděpodobně bude třeba jej rozšířit. -- **Standard** - Výchozí nastavení, které je použito v ukázkových manifestech nasazení k8s/terraform. -- **Střední** - produkční indexer podporující 100 podgrafů a 200-500 požadavků za sekundu. -- **Large** - Připraveno k indexování všech aktuálně nepoužívaných příbuzných podgrafů. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Nastavení | Postgres
(CPUs) | Postgres
(paměť v GBs) | Postgres
(disk v TBs) | VMs
(CPUs) | VMs
(paměť v GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Malé | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Střední | 16 | 64 | 2 | 32 | 64 | -| Velký | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### Jaká jsou základní bezpečnostní opatření, která by měl indexátor přijmout? +### What are some basic security precautions an Indexer should take? -- **Peněženka operátora** - Nastavení peněženky operátora je důležitým opatřením, protože umožňuje indexátorovi udržovat oddělení mezi klíči, které kontrolují sázky, a klíči, které řídí každodenní operace. Pokyny naleznete v části [Podíl na protokolu](/indexing/overview/#stake-in-the-protocol). +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Firewall** - Pouze služba Indexer musí být vystavena veřejně a zvláštní pozornost by měla být věnována uzamčení portů pro správu a přístupu k databázi: koncový bod JSON-RPC uzlu Graf (výchozí port: 8030), koncový bod API pro správu Indexeru (výchozí port: 18000) a koncový bod databáze Postgres (výchozí port: 5432) by neměly být vystaveny. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Infrastruktura +## Infrastructure -Centrem infrastruktury indexeru je uzel Graf, který monitoruje indexované sítě, extrahuje a načítá data podle definice podgrafu a poskytuje je jako [GraphQL API](/about/#how-the-graph-works). Uzel Graf musí být připojen ke koncovému bodu vystavujícímu data z každé indexované sítě, uzlu IPFS pro získávání dat, databázi PostgreSQL pro jejich ukládání a komponentám Indexeru, které usnadňují jeho interakci se sítí. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **Databáze PostgreSQL** - Hlavní úložiště pro uzel Graf kde jsou uložena data dílčích grafů. Služba Indexer a agent také používají databázi k ukládání dat stavového kanálu, nákladových modelů, indexačních pravidel a alokačních akcí. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Datový koncový bod** - Pro sítě kompatibilní s EVM musí být graf uzel připojen ke koncovému bodu, který vystavuje API JSON-RPC kompatibilní s EVM. To může mít podobu jediného klienta nebo může jít o složitější nastavení, které vyrovnává zátěž mezi více. Je důležité si uvědomit, že některé dílčí grafy budou vyžadovat konkrétní schopnosti klienta, jako je archivační režim a/nebo API pro sledování parity. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **Vuzel IPFS (verze menší než 5)** - Metadata nasazení podgrafů jsou uložena v síti IPFS. Uzel Graf přistupuje během nasazení podgrafu primárně k uzlu IPFS, aby načetl manifest podgrafu a všechny propojené soubory. Síťové indexátory nemusí hostovat vlastní uzel IPFS, uzel IPFS pro síť je hostován na adrese https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Služba indexeru** - Zpracovává veškerou požadovanou externí komunikaci se sítí. Sdílí nákladové modely a stavy indexace, předává požadavky na dotazy z bran na uzel Graga spravuje platby za dotazy prostřednictvím stavových kanálů s branou. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Usnadňuje interakce indexerů v řetězci, včetně registrace v síti, správy nasazení podgrafů do jejich grafových uzlů a správy alokací. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Server metrik Prometheus** - Komponenty Uzel grafu a Indexer zaznamenávají své metriky na metrický server. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Poznámka: Pro podporu agilního škálování se doporučuje oddělit dotazování a indexování mezi různé sady uzlů: dotazovací uzly a indexovací uzly. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Přehled portů +### Ports overview -> **Důležité**: Dávejte pozor na veřejné vystavování portů - **administrační porty** by měly být uzamčeny. To zahrnuje koncové body JSON-RPC uzlu Graf a koncové body správy Indexeru, které jsou podrobně popsány níže. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Uzel Graf -| Port | Účel | Trasy | CLI Argument | Proměnná prostředí | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(pro dotazy podgrafy) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(pro odběry podgrafů) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(pro správu nasazení) | / | --admin-port | - | -| 8030 | Stav indexování podgrafů API | /graphql | --index-node-port | - | -| 8040 | Metriky Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Služba Indexer +#### Indexer Service -| Port | Účel | Trasy | CLI Argument | Proměnná prostředí | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(pro placené dotazy na podgrafy) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Metriky Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Agent indexátoru +#### Indexer Agent -| Port | Účel | Trasy | CLI Argument | Proměnná prostředí | -| ---- | ------------------------- | ----- | ------------------------- | --------------------------------------- | -| 8000 | API pro správu indexátoru | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Nastavení serverové infrastruktury pomocí Terraformu ve Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Poznámka: Indexéry mohou alternativně používat AWS, Microsoft Azure nebo Alibaba. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Instalace předpokladů +#### Install prerequisites - Google Cloud SDK -- Nástroj příkazového řádku Kubectl +- Kubectl command line tool - Terraform -#### Vytvoření projektu Google Cloud +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Poznámka: Pro podporu agilního škálování se doporučuje oddělit dotazová cd terraform ``` -- Ověřte se pomocí služby Google Cloud a vytvořte nový projekt. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Pomocí fakturační stránky konzoly Google Cloud Console povolte fakturaci pro nový projekt. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Vytvořte konfiguraci služby Google Cloud. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Povolte požadované Google Cloud API. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Vytvoření účtu služby. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Povolte peering mezi databází a clusterem Kubernetes, který bude vytvořen v dalším kroku. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,22 +249,22 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Vytvořte minimální konfigurační soubor terraformu (aktualizujte jej podle potřeby). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **POZNÁMKA**: Všechny konfigurační proměnné za běhu mohou být použity buď jako parametry příkazu při spuštění, nebo pomocí proměnných prostředí ve formátu `NÁZEV_PŘÍKAZU_VARIABLE_NAME`(např. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### Agent indexátoru +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Služba Indexer +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -516,56 +516,56 @@ graph-indexer-service start \ #### Indexer CLI -Indexer CLI je zásuvný modul pro [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) přístupný v terminál na adrese `graf indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Správa indexeru pomocí Indexer CLI +#### Indexer management using Indexer CLI -Navrhovaným nástrojem pro interakci s **Indexer Management API** je **Indexer CLI**, rozšíření **Graph CLI**. Agent Indexer potřebuje vstup od Indexeru, aby mohl autonomně komunikovat se sítí jménem Indexeru. Mechanismem pro definování chování agenta Indexer jsou **režim správy přidělování** a **pravidla indexování**. V automatickém režimu může indexátor použít **indexační pravidla** k použití své specifické strategie pro výběr podgrafů k indexování a obsluze dotazů. Pravidla jsou spravována prostřednictvím GraphQL API obsluhovaného agentem a známého jako rozhraní API pro správu indexátoru. V manuálním režimu může indexátor vytvářet alokační akce pomocí **akční fronty** a explicitně je schvalovat před jejich provedením. V režimu dohledu se k naplnění **akční fronty** používají **indexační pravidla**, která rovněž vyžadují explicitní schválení pro provedení. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Použití +#### Usage -**Indexer CLI** se připojuje k agentovi Indexer, obvykle prostřednictvím přesměrování portů, takže CLI nemusí běžet na stejném serveru nebo clusteru. Abychom vám usnadnili začátek a poskytli vám určitý kontext, bude zde CLI stručně popsáno. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graf indexer připojit ` - Připojení k API pro správu indexeru. Obvykle se připojení k serveru otevírá pomocí přesměrování portů, takže CLI lze snadno ovládat na dálku. (Příklad: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `pravidla indexování grafů získat [možnosti] [ ...]` - Získá jedno nebo více indexovacích pravidel pomocí `all` jako `` pro získání všech pravidel nebo `global` pro získání globálních výchozích hodnot. Pomocí doplňkového argumentu `--merged` lze určit, že pravidla specifická pro nasazení budou sloučena s globálním pravidlem. Takto se použijí v agentu Indexer. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `pravidla indexování grafů získat [možnosti] ...` -Nastavení jednoho nebo více pravidel indexování. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `pravidla indexování grafů získat [možnosti] ` - Spustí indexování nasazení podgrafů, pokud je k dispozici, a nastaví jeho `decisionBasis` na `always`, takže agent Indexer vždy zvolí jeho indexování. Pokud je globální pravidlo nastaveno na vždy, pak budou indexovány všechny dostupné podgrafy v síti. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `zastavení pravidel indexování grafů [možnosti]` - Zastaví indexování nasazení a nastaví jeho `decisionBasis` na never, takže při rozhodování o nasazeních k indexování toto nasazení přeskočí. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `možná pravidla indexování grafů [možnosti] ` - Nastaví `decisionBasis` pro nasazení na `rules`, takže agent Indexer bude při rozhodování o indexování tohoto nasazení používat pravidla indexování. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` - Akce přidělení fronty +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` - Akce přerozdělení fronty +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` - Akce odalokování fronty +- `graph indexer action queue unallocate ` - Queue unallocate action -- `Akce indexátoru grafů zrušit [ ...]` - Zruší všechny akce ve frontě, pokud není id zadáno, jinak zruší pole id s mezerou jako oddělovačem +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `schvalovat akce indexátoru grafů[ ...]` - Schválení více akcí k provedení +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `akce indexátoru grafu provést schválit` - Vynutí, aby pracovník okamžitě provedl schválené akce +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Všechny příkazy, které zobrazují pravidla na výstupu, mohou pomocí argumentu `-output` volit mezi podporovanými výstupními formáty (`table`, `yaml` a `json`). +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### Pravidla indexování +#### Indexing rules -Pravidla indexování lze použít buď jako globální výchozí, nebo pro konkrétní nasazení podgrafů pomocí jejich ID. Pole `nasazení` a `podklad pro rozhodování` jsou povinná, zatímco všechna ostatní pole jsou nepovinná. Pokud má indexovací pravidlo `pravidla` jako `základnu rozhodování`, pak agent Indexer porovná nenulové prahové hodnoty tohoto pravidla s hodnotami získanými ze sítě pro příslušné nasazení. Pokud má dílčí nasazení grafu hodnoty vyšší (nebo nižší) než některá z prahových hodnot, bude vybráno pro indexaci. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Pokud má například globální pravidlo `minStake` hodnotu **5** (GRT), bude indexováno každé nasazení podgrafu, kterému je přiděleno více než 5 (GRT) podílů. Mezi prahová pravidla patří `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake` a `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Datový model: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -Příklad použití indexovacího pravidla: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### Fronta akcí CLI +#### Actions queue CLI -Indexer-cli poskytuje modul `actions` pro ruční práci s frontou akcí. K interakci s frontou akcí používá **Graphql API**, které je hostováno serverem pro správu indexeru. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -Pracovník pro provádění akcí převezme položky z fronty k provedení pouze tehdy, pokud mají `ActionStatus = approved`. V doporučené cestě jsou akce přidány do fronty se stavem ActionStatus = queued, takže pak musí být schváleny, aby mohly být provedeny v řetězci. Obecný průběh bude vypadat takto: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Akce přidaná do fronty nástrojem optimalizace třetí strany nebo uživatelem indexer-cli -- Indexer může pomocí `indexer-cli` zobrazit všechny akce ve frontě -- Indexer (nebo jiný software) může akce ve frontě schválit nebo zrušit pomocí příkazu `indexer-cli`. Příkazy approve a cancel přijímají jako vstup pole id akcí. -- Pracovník provádějící operace pravidelně kontroluje frontu schválených akcí. Vezme z fronty `schválené` akce, pokusí se je provést a aktualizuje hodnoty v db v závislosti na stavu provedení na `úspěšné` nebo `neúspěšné`. -- Pokud je akce úspěšná, pracovník zajistí, že je přítomno pravidlo indexování, které agentovi říká, jak má alokaci dále spravovat, což je užitečné při provádění ručních akcí, když je agent v režimu `auto` nebo `oversight`. -- Indexer může sledovat frontu akcí a zobrazit historii jejich provádění a v případě potřeby znovu schválit a aktualizovat položky akcí, pokud se nepodařilo je provést. Fronta akcí poskytuje historii všech akcí zařazených do fronty a provedených. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Datový model: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Příklad použití ze zdroje: +Example usage from source: ```bash graph indexer actions get all @@ -677,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Všimněte si, že podporované typy akcí pro správu přidělování mají různé vstupní požadavky: +Note that supported action types for allocation management have different input requirements: -- `Allocate` - přidělí podíl konkrétnímu nasazení podgrafu +- `Allocate` - allocate stake to a specific subgraph deployment - - požadované parametry akce: + - required action params: - deploymentID - - částka + - amount -- `Unallocate` - uzavře alokaci, čímž uvolní podíl k přerozdělení jinam +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - požadované parametry akce: + - required action params: - allocationID - deploymentID - - volitelné parametry akce: + - optional action params: - poi - - síla (vynutí pomocí poskytnutého POI, i když neodpovídá tomu, co poskytuje uzel graf) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Přerozdělit` – atomicky uzavřít alokaci a otevřít novou alokaci pro stejné nasazení podgrafu +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - požadované parametry akce: + - required action params: - allocationID - deploymentID - - částka - - volitelné parametry akce: + - amount + - optional action params: - poi - - síla (vynutí pomocí poskytnutého POI, i když neodpovídá tomu, co poskytuje uzel graf) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Nákladové modely +#### Cost models -Modely nákladů poskytují dynamické ceny pro dotazy na základě atributů trhu a dotazu. Služba Indexer sdílí s gateway model nákladů pro každý pods podgraf, na jehož dotazy chtějí odpovídat. Gatewaye pak na základě modelu nákladů rozhodují o výběru indexátoru pro každý dotaz a vyjednávají platby s vybranými indexátory. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Jazyk Agora poskytuje flexibilní formát pro deklarování nákladových modelů pro dotazy. Cenový model Agora je posloupnost příkazů, které se provádějí v pořadí pro každý dotaz nejvyšší úrovně v dotazu GraphQL. Pro každý dotaz nejvyšší úrovně určuje cenu tohoto dotazu první příkaz, který mu odpovídá. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Příkaz se skládá z predikátu, který se používá pro porovnávání dotazů GraphQL, a nákladového výrazu, který po vyhodnocení vypisuje náklady v desetinném GRT. Hodnoty na pozici pojmenovaného argumentu dotazu mohou být zachyceny v predikátu a použity ve výrazu. Globály lze také nastavit a nahradit jimi zástupné znaky ve výrazu. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Příklad nákladového modelu: +Example cost model: ``` # This statement captures the skip value, @@ -727,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -Příklad výpočtu nákladů na dotaz pomocí výše uvedeného modelu: +Example query costing using the above model: -| Dotaz | Cena | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Použití nákladového modelu +#### Applying the cost model -Nákladové modely se používají prostřednictvím Indexer CLI, které je předává API pro správu indexátoru agenta Indexer k uložení do databáze. Služba Indexer je pak vyzvedne a nákladové modely doručí branám, kdykoli o ně požádají. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interakce se sítí +## Interacting with the network -### Podíl na protokolu +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -Jakmile indexer zakládá GRT v protokolu, lze spustit komponenty [Indexer](/indexing/overview/#indexer-components) a zahájit jejich interakci se sítí. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Schválení žetonů +#### Approve tokens -1. Otevření aplikace [Remix](https://remix.ethereum.org/) v prohlížeči +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. V `Průzkumníku souborů` vytvořte soubor s názvem **GraphToken.abi** s [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. V části prostředí vyberte možnost `Injected Web3` a v části `Account` vyberte adresu indexeru. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Nastavení adresy smlouvy GraphToken - Vložte adresu smlouvy GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) vedle pole `At Address` a klikněte na tlačítko `At address` pro použití. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Voláním funkce `approve(zadavatel, částka)` schválíte smlouvu o sázce. Do pole `spender` vyplňte adresu smlouvy o sázce (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) a do pole `amount` tokeny, které chcete vsadit (ve wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Vkladové žetony +#### Stake tokens -1. Otevření aplikace [Remix](https://remix.ethereum.org/) v prohlížeči +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. V `Průzkumníku souborů` vytvořte soubor s názvem **Staking.abi** s ABI staking. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. V části prostředí vyberte možnost `Injected Web3` a v části `Account` vyberte adresu indexeru. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Nastavení adresy smlouvy o sázce - Vložte adresu smlouvy o sázce (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) vedle pole `At address` a klikněte na tlačítko `At address` pro použití. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Voláním `stake()` vložíte GRT do protokolu. +6. Call `stake()` to stake GRT in the protocol. -7. (Nepovinné) Indexátoři mohou schválit jinou adresu jako provozovatele své infrastruktury indexátorů, aby se oddělily klíče, které kontrolují finanční prostředky, od klíčů, které provádějí každodenní činnosti, jako je přidělování v podgrafech a obsluha (placených) dotazů. Pro nastavení operátora zavolejte `setOperator()` s adresou operátora. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (Nepovinné) Za účelem řízení rozdělování odměn a strategického přilákání delegátů mohou indexátory aktualizovat své parametry delegování aktualizací indexingRewardCut (díly na milion), queryFeeCut (díly na milion) a cooldownBlocks (počet bloků). Za tímto účelem zavolejte `setDelegationParameters()`. Následující příklad nastavuje queryFeeCut tak, aby 95 % odměn za dotaz bylo rozděleno indexátoru a 5 % delegátům, indexingRewardCutto rozděluje 60 % odměn za indexování indexátoru a 40 % delegátům, a nastavuje `obdobícooldownBlocks` na 500 bloků. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### Životnost přídělu +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Aktivní** – Jakmile je alokace vytvořena v řetězci ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/ contract/staking/Staking.sol#L316)) je považován za **aktivní**. Část vlastního a/nebo delegovaného podílu indexeru je přidělena na nasazení podgrafu, což jim umožňuje nárokovat si odměny za indexování a obsluhovat dotazy pro toto nasazení podgrafu. Agent indexeru spravuje vytváření alokací na základě pravidel indexeru. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexerům se doporučuje využívat funkce synchronizace mimo řetězec k synchronizaci nasazení subgrafů do hlavy řetězce před vytvořením alokace v řetězci. Tato funkce je užitečná zejména pro subgrafy, jejichž synchronizace může trvat déle než 28 epoch nebo u nichž existuje určitá pravděpodobnost nedeterministického selhání. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From c9efc25e74be16fce3a56030a3c86904a637105f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:38 -0500 Subject: [PATCH 0074/1534] New translations overview.mdx (German) --- website/src/pages/de/indexing/overview.mdx | 124 ++++++++++----------- 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/website/src/pages/de/indexing/overview.mdx b/website/src/pages/de/indexing/overview.mdx index 3c76d00f6cc2..5a0bc16080c8 100644 --- a/website/src/pages/de/indexing/overview.mdx +++ b/website/src/pages/de/indexing/overview.mdx @@ -1,8 +1,9 @@ --- -title: Indexing +title: Indizierung Überblick +sidebarTitle: Überblick --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. +Indexer sind Knotenbetreiber im Graph Network, die Graph Tokens (GRT) einsetzen, um Indizierungs- und Abfrageverarbeitungsdienste anzubieten. Indexer verdienen Abfragegebühren und Indexing Rewards für ihre Dienste. Sie verdienen auch Abfragegebühren, die gemäß einer exponentiellen Rabattfunktion zurückerstattet werden. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -59,8 +60,7 @@ query indexerAllocations { Use Etherscan to call `getRewards()`: - Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* To call `getRewards()`: +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - Enter the **allocationID** in the input. - Click the **Query** button. @@ -110,12 +110,12 @@ Indexers may differentiate themselves by applying advanced techniques for making - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. - **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Konfiguration | Postgres
(CPUs) | Postgres
(Speicher in GB) | Postgres
(Festplatte in TB) | VMs
(CPUs) | VMs
(Speicher in GB) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Klein | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Mittel | 16 | 64 | 2 | 32 | 64 | -| Groß | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -123,7 +123,7 @@ Indexers may differentiate themselves by applying advanced techniques for making - **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Infrastruktur +## Infrastructure At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. @@ -135,7 +135,7 @@ At the center of an Indexer's infrastructure is the Graph Node which monitors th - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -145,37 +145,37 @@ Note: To support agile scaling, it is recommended that query and indexing concer > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. -#### Der Graph-Knoten +#### Graph Node -| Port | Zweck | Routen | CLI-Argument | Umgebungsvariable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP-Server
(für Subgraf-Abfragen) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(für Subgraf-Abonnements) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(zum Verwalten von Deployments) | / | --admin-port | - | -| 8030 | Subgraf-Indizierungsstatus-API | /graphql | --index-node-port | - | -| 8040 | Prometheus-Metriken | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Zweck | Routen | CLI-Argument | Umgebungsvariable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL-HTTP-Server
(für kostenpflichtige Subgraf-Abfragen) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus-Metriken | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Port | Zweck | Routen | CLI-Argument | Umgebungsvariable | -| ---- | ----------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer-Verwaltungs-API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | ### Setup server infrastructure using Terraform on Google Cloud > Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Installieren Sie die Voraussetzungen +#### Install prerequisites -- Google Cloud-SDK -- Kubectl-Befehlszeilentool +- Google Cloud SDK +- Kubectl command line tool - Terraform #### Create a Google Cloud Project @@ -188,7 +188,7 @@ Note: To support agile scaling, it is recommended that query and indexing concer cd terraform ``` -- Authentifizieren Sie sich bei Google Cloud und erstellen Sie ein neues Projekt. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Aktivieren Sie die Abrechnung für das neue Projekt auf der Abrechnungsseite der Google Cloud Console. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Erstellen Sie eine Google Cloud-Konfiguration. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Aktivieren Sie die erforderlichen Google Cloud-APIs. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -243,7 +243,7 @@ gcloud compute addresses create google-managed-services-default \ --purpose=VPC_PEERING \ --network default \ --global \ - --description 'IP Range for peer Networks.' + --description 'IP Range for peer networks.' gcloud services vpc-peerings connect \ --network=default \ --ranges=google-managed-services-default @@ -256,11 +256,11 @@ indexer= cat > terraform.tfvars < **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### Indexer-Agent +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Indexer-Service +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -514,7 +514,7 @@ graph-indexer-service start \ | pino-pretty ``` -#### Indexer-CLI +#### Indexer CLI The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. @@ -615,7 +615,7 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: - Action added to the queue by the 3rd party optimizer tool or indexer-cli user - Indexer can use the `indexer-cli` to view all queued actions @@ -704,7 +704,7 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Kostenmodelle +#### Cost models Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. @@ -729,13 +729,13 @@ default => 0.1 * $SYSTEM_LOAD; Example query costing using the above model: -| Abfrage | Preis | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Anwendung des Kostenmodells +#### Applying the cost model Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. @@ -744,7 +744,7 @@ indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interaktion mit dem Netzwerk +## Interacting with the network ### Stake in the protocol @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From ec36430f5209c057d81d3d4c65728ee4ffbd3041 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:39 -0500 Subject: [PATCH 0075/1534] New translations overview.mdx (Italian) --- website/src/pages/it/indexing/overview.mdx | 408 ++++++++++----------- 1 file changed, 204 insertions(+), 204 deletions(-) diff --git a/website/src/pages/it/indexing/overview.mdx b/website/src/pages/it/indexing/overview.mdx index baaac4ea6609..a0c9a3b20227 100644 --- a/website/src/pages/it/indexing/overview.mdx +++ b/website/src/pages/it/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indicizzazione +title: Indexing Overview +sidebarTitle: Panoramica --- Gli Indexer sono operatori di nodi di The Graph Network che fanno staking di Graph Token (GRT) per fornire servizi di indicizzazione e di elaborazione delle query. Gli Indexer guadagnano tariffe di query e ricompense di indicizzazione per i loro servizi. Guadagnano anche tariffe di query che vengono rimborsate in base a una funzione di rimborso esponenziale. @@ -10,35 +11,35 @@ Gli Indexer selezionano i subgraph da indicizzare in base al segnale di curation ## FAQ -### Qual è lo stake minimo richiesto per essere un Indexer sulla rete? +### What is the minimum stake required to be an Indexer on the network? -Lo stake minimo per un Indexer è attualmente fissato a 100K GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### Quali sono le fonti di guadagno di un Indexer? +### What are the revenue streams for an Indexer? -**Sconti sulle tariffe di query** - Pagamenti per servire le query sulla rete. Questi pagamenti sono mediati da canali di stato tra un Indexer e un gateway. Ogni richiesta di query da parte di un gateway contiene un pagamento e la risposta corrispondente una prova della validità del risultato della query. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Ricompense di indicizzazione** - Generate tramite un'inflazione annuale del 3% a livello di protocollo, le ricompense di indicizzazione sono distribuite agli Indexer che indicizzano le distribuzioni di subgraph per la rete. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### Come vengono distribuite le ricompense di indicizzazione? +### How are indexing rewards distributed? -Le ricompense di indicizzazione provengono dall'inflazione del protocollo, impostata al 3% di emissione annuale. Vengono distribuite tra i subgraph in base alla proporzione di tutti i segnali di curation su ciascuno di essi, quindi distribuite proporzionalmente agli Indexer in base allo stake di partecipazione assegnato a quel subgraph. **Un'allocation deve essere chiusa con una prova valida di indicizzazione (POI) che soddisfi gli standard stabiliti dalla carta dell'arbitrato per poter beneficiare delle ricompense.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### Che cos'è una prova di indicizzazione (POI)? +### What is a proof of indexing (POI)? -I POI sono utilizzati nella rete per verificare che un Indexer stia indicizzando i subgraph su cui ha effettuato l'allocazione. Un POI per il primo blocco dell'epoca corrente deve essere presentato alla chiusura di un'allocazione affinché questa possa beneficiare delle ricompense di indicizzazione. Un POI per un blocco è un insieme di tutte le transazioni dell'entity store per una specifica distribuzione di subgraph fino a quel blocco incluso. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### Quando vengono distribuite le ricompense di indicizzazione? +### When are indexing rewards distributed? -Le allocazioni accumulano continuamente ricompense mentre sono attive e vengono allocate entro 28 epoche. Le ricompense vengono raccolte dagli Indexer e distribuite quando le loro allocazioni vengono chiuse. Ciò avviene manualmente, ogni volta che l'Indexer vuole chiuderle forzatamente, oppure dopo 28 epoche un Delegator può chiudere l'allocazione per l'Indexer, ma questo non comporta ricompense. 28 epoche è la durata massima dell'allocazione (al momento, un'epoca dura circa 24 ore). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### È possibile monitorare le ricompense di indicizzazione in sospeso? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Molte delle dashboard create dalla comunità includono i valori delle ricompense in sospeso, che possono essere facilmente controllate manualmente seguendo questi passaggi: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,129 +57,128 @@ query indexerAllocations { } ``` -Utilizzare Etherscan per chiamare `getRewards()`: +Use Etherscan to call `getRewards()`: -- Andare su [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* Per chiamare `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - Inserire l'**allocationID** nell'input. - - Fare clic sul pulsante **Query**. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### Cosa sono le controversie e dove posso vederle? +### What are disputes and where can I view them? -Le query e le allocazioni dell'Indexer possono essere contestate su The Graph durante il periodo di contestazione. Il periodo di contestazione varia a seconda del tipo di contestazione. Le query/attestazioni hanno una finestra di contestazione di 7 epoche, mentre le allocazioni 56 epoche. Una volta trascorsi questi periodi, non è più possibile aprire controversie né contro le allocation né contro le query. Quando viene aperta una controversia, i Fisherman devono versare un deposito minimo di 10.000 GRT, che rimarrà bloccato fino a quando la controversia non sarà conclusa e sarà stata data una risoluzione. I Fisherman sono tutti i partecipanti alla rete che aprono controversie. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -Le controversie hanno **tre** esiti possibili, così come il deposito dei Fishermen. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- Se la controversia viene respinta, il GRT depositato dai Fishermen verrà bruciato e l'Indexer contestato non verrà tagliato. -- Se la controversia viene risolta con un pareggio, il deposito dei Fishermen verrà restituito e l'Indexer contestato non verrà tagliato. -- Se la controversia viene accettata, il GRT depositato dai Fishermen verrà restituito, l'Indexer contestato verrà tagliato e i Fishermen guadagneranno il 50% dei GRT tagliati. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -Le controversie possono essere visualizzate nell'interfaccia utente nella pagina del profilo di un Indexer, sotto la scheda `Disputes`. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### Cosa sono gli sconti sulle tariffe di query e quando vengono distribuiti? +### What are query fee rebates and when are they distributed? -Le tariffe di query sono raccolte dal gateway e distribuite agli Indexer in base alla funzione di sconto esponenziale (vedi GIP [qui](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). La funzione di sconto esponenziale è proposta come un modo per garantire che gli Indexer ottengano il miglior risultato servendo fedelmente le query. Funziona incentivando gli Indexer ad allocare una grande quantità di stake (che può essere tagliato in caso di errore quando si serve una query) rispetto all'ammontare delle tariffe di query che possono raccogliere. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Una volta chiusa un'allocation, gli sconti possono essere richiesti dall'Indexer. Al momento della richiesta, gli sconti sulle tariffe di query vengono distribuiti all'Indexer e ai suoi Delegator in base alla riduzione delle tariffe di query e alla funzione di sconto esponenziale. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### Che cos'è la query fee cut e la indexing reward cut? +### What is query fee cut and indexing reward cut? -I valori di `queryFeeCut` e `indexingRewardCut` sono parametri di delega che l'Indexer può impostare insieme ai cooldownBlocks per controllare la distribuzione dei GRT tra l'Indexer i suoi Delegator. Per le istruzioni sull'impostazione dei parametri di delega, si vedano gli ultimi passi di [Staking nel Protocollo](/indexing/overview/#stake-in-the-protocol). +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - la % delle riduzioni delle tariffe di query che verrà distribuita all'Indexer. Se questa opzione è impostata al 95%, l'Indexer riceverà il 95% delle tariffe di query guadagnate alla chiusura di un'allocazione, mentre il restante 5% andrà ai Delegator. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - la % delle ricompense di indicizzazione che verrà distribuita all'Indexer. Se è impostata al 95%, l'Indexer riceverà il 95% delle ricompense di indicizzazione quando viene chiusa un'allocazione e i Delegator si divideranno il restante 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### Come fanno gli Indexer a sapere quali subgraph indicizzare? +### How do Indexers know which subgraphs to index? -Gli Indexer possono differenziarsi applicando tecniche avanzate per prendere decisioni sull'indicizzazione dei subgraph, ma per dare un'idea generale discuteremo diverse metriche chiave utilizzate per valutare i subgraph nella rete: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **Segnale di curation** - La percentuale del segnale di curation della rete applicato a un particolare subgraph è un buon indicatore dell'interesse per quel subgraph, soprattutto durante la fase di bootstrap, quando il volume delle query è in aumento. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Tariffe di query raccolte** - I dati storici relativi al volume delle tariffe di query raccolte per uno specifico subgraph sono un buon indicatore della domanda futura. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Importo sullo staking** - Il monitoraggio del comportamento degli altri Indexer o l'esame delle proporzioni dello stake totale allocato a specifici subgraph può consentire a un Indexer di monitorare il lato dell'offerta per le query sui subgraph, per identificare i subgraph in cui la rete mostra fiducia o i subgraph che potrebbero avere bisogno di maggiore offerta. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Subgraph senza ricompense di indicizzazione** - Alcuni subgraph non generano ricompense per l'indicizzazione principalmente perché utilizzano funzioni non supportate come IPFS o perché stanno facendo query su un'altra rete al di fuori della mainnet. Se un subgraph non genera ricompense di indicizzazione, viene visualizzato un messaggio. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### Quali sono i requisiti hardware? +### What are the hardware requirements? -- **Piccolo** - Sufficiente per iniziare a indicizzare diversi subgraph, probabilmente dovrà essere ampliato. -- **Standard** - Impostazione predefinita, è quella usata nei manifesti di distribuzione di esempio di k8s/terraform. -- **Medio** - Indexer di produzione che supporta 100 subgraph e 200-500 richieste al secondo. -- **Grande** - È pronto a indicizzare tutti i subgraph attualmente utilizzati e a servire le richieste per il relativo traffico. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memoria in GBs) | Postgres
(disco in TBs) | VMs
(CPUs) | VMs
(memoria in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Piccolo | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medio | 16 | 64 | 2 | 32 | 64 | -| Grande | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### Quali sono le precauzioni di base per la sicurezza che un Indexer dovrebbe adottare? +### What are some basic security precautions an Indexer should take? -- **Operator wallet** - La creazione di un operator wallet è una precauzione importante perché consente all'Indexer di mantenere una separazione tra le chiavi che controllano il stake e quelle che controllano le operazioni quotidiane. Vedere[ Stake al protocollo](/indexing/overview/#stake-in-the-protocol) per le istruzioni. +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Firewall** - Solo l'Indexer service deve essere esposto pubblicamente e occorre prestare particolare attenzione a bloccare le porte di amministrazione e l'accesso al database: l'endpoint JSON-RPC di the Graph Node (porta predefinita: 8030), l'endpoint API di gestione dell'Indexer (porta predefinita: 18000) e l'endpoint del database Postgres (porta predefinita: 5432) non devono essere esposti. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Infrastruttura +## Infrastructure -Al centro dell'infrastruttura di un Indexer c'è the Graph Node, che monitora le reti indicizzate, estrae e carica i dati secondo una definizione di subgraph e li serve come [GraphQL API](/about/#how-the-graph-works). The Graph Node deve essere collegato a un endpoint che espone i dati di ciascuna rete indicizzata, a un nodo IPFS per l'approvvigionamento dei dati, a un database PostgreSQL per il suo archivio e a componenti dell'Indexer che facilitano le interazioni con la rete. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **Database PostgreSQL** - È l'archivio principale del The Graph Node, dove vengono memorizzati i dati dei subgraph. Anche l'Indexer Service e l'Indexer Agent utilizzano il database per memorizzare i dati del canale di stato, i modelli di costo, le regole di indicizzazione e le azioni di allocation. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Endpoint dei dati** - Per le reti compatibili con EVM, il Graph Node deve essere collegato a un endpoint che esponga un'API JSON-RPC compatibile con EVM. Questo può assumere la forma di un singolo client o può essere una configurazione più complessa che bilancia il carico su più client. È importante sapere che alcuni subgraph richiedono particolari funzionalità del client, come la modalità di archiviazione e/o l'API di tracciamento della parità. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **Nodo IPFS (versione inferiore a 5)** - I metadati di distribuzione del subgraph sono memorizzati sulla rete IPFS. Il Graph Node accede principalmente al nodo IPFS durante la distribuzione del subgraph per recuperare il manifest del subgraph e tutti i file collegati. Gli Indexer di rete non hanno bisogno di ospitare il proprio nodo IPFS; un nodo IPFS per la rete è ospitato all'indirizzo https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Indexer service** - Gestisce tutte le comunicazioni esterne necessarie con la rete. Condivide i modelli di costo e gli stati di indicizzazione, passa le richieste di query dai gateway a un Graph Node e gestisce i pagamenti delle query tramite canali di stato con il gateway. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilita le interazioni degli Indexer sulla chain, compresa la registrazione sulla rete, la gestione delle distribuzioni di subgraph ai Graph Node e la gestione delle allocazioni. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Server di metriche Prometheus** - I componenti Graph Node e Indexer registrano le loro metriche sul server delle metriche. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Nota: Per supportare una scalabilità agile, si consiglia di separare le attività di query e indicizzazione tra diversi gruppi di nodi: nodi di query e nodi di indicizzazione. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Panoramica delle porte +### Ports overview -> **Importante**: Fate attenzione a esporre pubblicamente le porte - **le porte di amministrazione** devono essere sempre bloccate. Questo include gli endpoint JSON-RPC del Graph Node e quelli di gestione dell'Indexer, descritti di seguito. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Porta | Obiettivo | Routes | Argomento CLI | Variabile d'ambiente | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(per le query di subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(per le sottoscrizioni ai subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(per la gestione dei deployment) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Metriche di Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Servizio Indexer +#### Indexer Service -| Porta | Obiettivo | Routes | Argomento CLI | Variabile d'ambiente | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(per le query di subgraph a pagamento) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Metriche di Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Porta | Obiettivo | Routes | Argomento CLI | Variabile d'ambiente | -| ----- | ----------------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | API di gestione degli Indexer | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Configurare l'infrastruttura server utilizzando Terraform su Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Nota: gli Indexer possono utilizzare in alternativa AWS, Microsoft Azure o Alibaba. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Installare i prerequisiti +#### Install prerequisites - Google Cloud SDK - Kubectl command line tool - Terraform -#### Creare un progetto Google Cloud +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Nota: Per supportare una scalabilità agile, si consiglia di separare le attivit cd terraform ``` -- Autenticare con Google Cloud e creare un nuovo progetto. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Utilizzare la pagina di fatturazione di Google Cloud Console per abilitare la fatturazione del nuovo progetto. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Creare una configurazione di Google Cloud. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Abilitare le API di Google Cloud necessarie. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Creare un account di servizio. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Abilitare il peering tra il database e il cluster Kubernetes che verrà creato nella fase successiva. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,22 +249,22 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Creare un file di configurazione minima di terraform (aggiornare secondo necessità). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **NOTA**: Tutte le variabili di configurazione di runtime possono essere applicate come parametri al comando all'avvio o utilizzando variabili d'ambiente del formato `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). #### Indexer agent @@ -516,56 +516,56 @@ graph-indexer-service start \ #### Indexer CLI -L'Indexer CLI è un plugin per [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessibile nel terminale a `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Gestione dell'Indexer tramite Indexer CLI +#### Indexer management using Indexer CLI -Lo strumento suggerito per interagire con l'**Indexer Management API** è il **Indexer CLI**, un'estensione del **Graph CLI**. L'Indexer agent ha bisogno di input da un Indexer per interagire autonomamente con la rete per conto dell'Indexer. I meccanismi per definire il comportamento dell' Indexer agent sono modalità di **gestione dell'allocazione** e **regole di indicizzazione**. In modalità automatica, un Indexer può utilizzare le **regole di indicizzazione** per applicare la propria strategia specifica di selezione dei subgraph da indicizzare e per i quali servire le query. Le regole sono gestite tramite un' GraphQL API servito dall' agent e noto come Indexer Management API. In modalità manuale, un Indexer può creare azioni di allocation usando **actions queue** e approvarli esplicitamente prima che vengano eseguiti. In modalità di supervisione, le **regole di indicizzazione** sono utilizzate per popolare le **actions queue** e richiedono anche un'approvazione esplicita per l'esecuzione. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Utilizzo +#### Usage -Il **Indexer CLI** si connette all'Indexer Agent, in genere tramite port-forwarding, quindi non è necessario che la CLI venga eseguita sullo stesso server o cluster. Per aiutarvi a iniziare e per fornire un contesto, la CLI verrà descritta brevemente qui. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` - Connettersi all' Indexer management API. In genere la connessione al server viene aperta tramite il port forwarding, in modo che la CLI possa essere facilmente utilizzata in remoto. (Esempio: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Ottenere una o più regole di indicizzazione utilizzando `all` come `` per ottenere tutte le regole, oppure `global` per ottenere i valori predefiniti globali. Un argomento aggiuntivo `--merged` può essere usato per specificare che le regole specifiche dell'implementazione vengono unite alla regola globale. Questo è il modo in cui vengono applicate nell' Indexer agent. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - Impostare una o più regole di indicizzazione. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Avviare l'indicizzazione di una distribuzione di subgraph, se disponibile, e impostare il suo valore di `decisionBasis` per `always`, quindi l' Indexer agent sceglierà sempre di indicizzarlo. Se la regola globale è impostata su sempre, tutti i subgraph disponibili sulla rete saranno indicizzati. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` - Interrompere l'indicizzazione di un'installazione e impostare il suo `decisionBasis` a mai, quindi salterà questa distribuzione quando deciderà le distribuzioni da indicizzare. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` — Impostare il `decisionBasis` per una distribuzione a `rules`, in modo che l' Indexer agent utilizzi le regole di indicizzazione per decidere se indicizzare questa distribuzione. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` - Azione di allocation della coda +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` - Azione di riallocazione della coda +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` - Azione di deallocazione della coda +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - Annulla tutte le azioni nella coda se l'id non è specificato, altrimenti annulla l'array di id con lo spazio come separatore +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - Approvare più azioni multiple da eseguire +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` - Forzare il lavoratore a eseguire immediatamente le azioni approvate +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Tutti i comandi che visualizzano le regole nell'output possono scegliere tra i formati di output supportati (`table`, `yaml`, and `json`) utilizzando l'argomento `-output`. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### Regole di indicizzazione +#### Indexing rules -Le regole di indicizzazione possono essere applicate come valori predefiniti globali o per specifiche distribuzioni di subgraph utilizzando i loro ID. I campi `deployment` e `decisionBasis` sono obbligatori, mentre tutti gli altri campi sono facoltativi. Quando una regola di indicizzazione ha `rules` come `decisionBasis`, allora l' Indexer agent confronterà i valori di soglia non nulli di quella regola con i valori recuperati dalla rete per la distribuzione corrispondente. Se la distribuzione del subgraph ha valori superiori (o inferiori) a una qualsiasi delle soglie, verrà scelta per l'indicizzazione. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Ad esempio, se la regola globale ha `minStake` di **5** (GRT) qualsiasi schieramento di subgraph che abbia più di 5 (GRT) di stake assegnati ad esso sarà indicizzato. Le regole di soglia includono `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, e `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Modello di dati: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -Esempio di utilizzo della regola di indicizzazione: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### CLI della coda di azioni +#### Actions queue CLI -L'indexer-cli fornisce un modulo di `actions` per lavorare manualmente con la coda di azioni. Utilizza il **Graphql API** ospitato dal server di gestione dell'Indexer per interagire con la coda delle azioni. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -L'operaio per l'esecuzione dell'azione prenderà gli elementi dalla coda per eseguirli solo se hanno `ActionStatus = approved`. Nel percorso consigliato le azioni vengono aggiunte alla coda con ActionStatus = queued, quindi devono essere approvate per essere eseguite sulla catena. Il flusso generale sarà simile a: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Azione aggiunta alla coda dallo strumento ottimizzatore di terze parti o dall'utente di indexer-cli -- L' Indexer può utilizzare l'`indexer-cli` per visualizzare tutte le azioni in coda -- L'Indexer (o un altro software) può approvare o annullare le azioni nella coda utilizzando l'`indexer-cli`. I comandi approva e annulla prendono in input un array di id di azioni. -- L'operaio di esecuzione controlla regolarmente la coda per le azioni approvate. Prenderà le azioni `approved` dalla coda, tenterà di eseguirle e aggiornerà i valori nel db a seconda dello stato di esecuzione con `success` oppure `failed`. -- Se un'azione ha successo, l'operaio si assicurerà che sia presente una regola di indicizzazione che indichi all'agente come gestire l'allocazione in futuro, utile quando si intraprendono azioni manuali mentre l'agente è in modalità `auto` oppure `oversight`. -- L'Indexer può monitorare la coda delle azioni per vedere la cronologia dell'esecuzione delle azioni e, se necessario, riapprovare e aggiornare le voci di azione se non sono state eseguite. La coda di azioni fornisce una cronologia di tutte le azioni accodate ed eseguite. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Modello di dati: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Esempio di utilizzo dalla sorgente: +Example usage from source: ```bash graph indexer actions get all @@ -677,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Si noti che i tipi di azione supportati per la gestione dell'allocazione hanno requisiti di input diversi: +Note that supported action types for allocation management have different input requirements: -- `Allocate` - allocare lo stake ad uno specifico deploy di subgraph +- `Allocate` - allocate stake to a specific subgraph deployment - - parametri d'azione richiesti: + - required action params: - deploymentID - amount -- `Unallocate` - chiudere l'allocazione, liberando lo stake da riallocare altrove +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - parametri d'azione richiesti: + - required action params: - allocationID - deploymentID - - parametri dell'azione facoltativi: + - optional action params: - poi - - force (forza l'uso del POI fornito anche se non corrisponde a quello fornito dal the graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - chiudere atomicamente l'allocazione e aprire una nuova allocazione per lo stesso deplloy del subgraph +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - parametri d'azione richiesti: + - required action params: - allocationID - deploymentID - amount - - parametri dell'azione facoltativi: + - optional action params: - poi - - force (forza l'uso del POI fornito anche se non corrisponde a quello fornito dal the graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Modelli di costo +#### Cost models -I modelli di costo forniscono prezzi dinamici per le query in base al mercato e agli attributi della query. L'Indexer service condivide con i gateway un modello di costo per ogni subgraph per il quale intende rispondere alle query. I gateway, a loro volta, utilizzano il modello di costo per prendere decisioni sulla selezione degli Indexer per ogni query e per negoziare il pagamento con gli Indexer scelti. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Il linguaggio Agora fornisce un formato flessibile per dichiarare i modelli di costo delle query. Un modello di prezzo Agora è una sequenza di istruzioni che vengono eseguite in ordine per ogni query di primo livello in una query GraphQL. Per ogni query di primo livello, la prima istruzione che vi corrisponde determina il prezzo per quella query. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Una dichiarazione è composta da un predicato, che viene usato per abbinare le query GraphQL, e da un'espressione di costo che, una volta valutata, produce un costo in GRT decimali. I valori nella posizione dell'argomento nominato di una query possono essere catturati nel predicato e usati nell'espressione. Si possono anche impostare dei globali e sostituirli ai segnaposto in un'espressione. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Esempio di modello di costo: +Example cost model: ``` # This statement captures the skip value, @@ -727,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -Esempio di query di costo utilizzando il modello di cui sopra: +Example query costing using the above model: -| Query | Prezzo | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Applicazione del modello di costo +#### Applying the cost model -I modelli di costo vengono applicati tramite l'Indexer CLI, che li passa all'Indexer Management API dell' Indexer agent per la memorizzazione nel database. L'Indexer Service li preleva e serve i modelli di costo ai gateway ogni volta che li richiedono. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interazione con la rete +## Interacting with the network -### Staking al protocollo +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -Una volta che l'Indexer ha messo in staking i GRT nel protocollo, gli [Indexer components](/indexing/overview/#indexer-components) possono essere avviati e iniziare le loro interazioni con la rete. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Approvare i token +#### Approve tokens -1. Aprire il [Remix app](https://remix.ethereum.org/) nel browser +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. Nel `File Explorer` creare un file chiamato **GraphToken.abi** con il [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. In Ambiente selezionare `Injected Web3` e nel `Account` selezionare l'indirizzo dell'Indexer. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Impostare l'indirizzo del contratto GraphToken - Incollare l'indirizzo del contratto GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) vicino a `At Address` e fare clic sul pulsante `At address` per applicare. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Chiamare la funzione `approve(spender, amount)` per approvare il contratto di staking. Inserire in `spender` l'indirizzo del contratto di Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) e `amount` con i token da fare staking (in wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Fare staking dei token +#### Stake tokens -1. Aprire il [Remix app](https://remix.ethereum.org/) nel browser +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. Nel `File Explorer` creare un file chiamato **Staking.abi** con log staking ABI. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. In Ambiente selezionare `Injected Web3` e nel `Account` selezionare l'indirizzo dell'Indexer. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Impostare l'indirizzo del contratto di Staking - Incollare l'indirizzo del contratto di Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) vicino a `At Address` e fare click sul pulsante `At address` per applicare. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Chiamare `stake()` per fare staking di GRT sul protocollo. +6. Call `stake()` to stake GRT in the protocol. -7. (Opzionale) Gli Indexer possono approvare un altro indirizzo come operatore per la loro infrastruttura di indicizzazione, al fine di separare le chiavi che controllano i fondi da quelle che eseguono le azioni quotidiane, come l'allocazione sui subgraph e il servizio di query (a pagamento). Per impostare l'operatore chiamare `setOperator()` con l'indirizzo dell'operatore. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (Opzionale) Per controllare la distribuzione delle ricompense e attirare strategicamente i delegator, gli Indexer possono aggiornare i loro parametri di delega aggiornando i loro indexingRewardCut (parti per milione), queryFeeCut (parti per milione) e cooldownBlocks (numero di blocchi). Per farlo, chiamare `setDelegationParameters()`. L'esempio seguente imposta il queryFeeCut per distribuire il 95% degli sconti sulle query all'Indexer e il 5% ai Delegator, imposta l'indexingRewardCut per distribuire il 60% delle ricompense per l'indicizzazione all'Indexer e il 40% ai Delegator, e imposta il periodo di `thecooldownBlocks` a 500 blocks. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### La vita di un'allocazione +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Si raccomanda agli Indexer di utilizzare la funzionalità di sincronizzazione offchain per sincronizzare le distribuzioni dei subgraph con chainhead prima di creare l'allocazione on-chain. Questa funzione è particolarmente utile per i subgraph che possono richiedere più di 28 epoche per la sincronizzazione o che hanno qualche possibilità di fallire in modo indeterminato. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 232d0d0213d599cf2bb9fe8985e1b0d97d40ea18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:40 -0500 Subject: [PATCH 0076/1534] New translations overview.mdx (Japanese) --- website/src/pages/ja/indexing/overview.mdx | 439 +++++++++++---------- 1 file changed, 220 insertions(+), 219 deletions(-) diff --git a/website/src/pages/ja/indexing/overview.mdx b/website/src/pages/ja/indexing/overview.mdx index 9d27e07afd66..ecca0c6bc210 100644 --- a/website/src/pages/ja/indexing/overview.mdx +++ b/website/src/pages/ja/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: インデキシング +title: Indexing Overview +sidebarTitle: 概要 --- インデクサーは、The Graph Network内のノードオペレーターであり、インデックス化とクエリ処理のサービスを提供するためにGraph Token(GRT)をステークします。インデクサーは、そのサービスに対するクエリ料金とインデックスリワードを獲得します。また、指数的なリベート関数に従ってリベートされるクエリ料金も獲得します。 @@ -8,43 +9,43 @@ title: インデキシング インデクサ − は、サブグラフのキュレーション・シグナルに基づいてインデックスを作成するサブグラフを選択し、キュレーターは、どのサブグラフが高品質で優先されるべきかを示すために GRT をステークします。 消費者(アプリケーションなど)は、インデクサーが自分のサブグラフに対するクエリを処理するパラメータを設定したり、クエリフィーの設定を行うこともできます。 -## よくある質問 +## FAQ -### ネットワーク上のインデクサーになるために必要な最低ステーク量はいくらですか? +### What is the minimum stake required to be an Indexer on the network? -インデクサーの最低ステーク量は、現在 100K GRT に設定されています。 +The minimum stake for an Indexer is currently set to 100K GRT. -### インデクサーの収入源は何ですか? +### What are the revenue streams for an Indexer? -**クエリフィー・リベート** - ネットワーク上でクエリを提供するための手数料です。 この手数料は、インデクサーとゲートウェイ間のステートチャネルを介して支払われます。 ゲートウェイからの各クエリリクエストには手数料が含まれ、対応するレスポンスにはクエリ結果の有効性の証明が含まれます。 +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**インデキシング報酬** - プロトコル全体のインフレーションにより生成される年率 3%のインデキシング報酬は、ネットワークのサブグラフ・デプロイメントのインデキシングを行うインデクサーに分配されます。 +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### インデキシングリワードはどのように配布されますか? +### How are indexing rewards distributed? -インデキシング報酬は、年間 3%の発行量に設定されているプロトコル・インフレから得られます。 報酬は、それぞれのサブグラフにおけるすべてのキュレーション・シグナルの割合に基づいてサブグラフに分配され、そのサブグラフに割り当てられたステークに基づいてインデクサーに分配されます。 **特典を受けるためには、仲裁憲章で定められた基準を満たす有効な POI(Proof of Indexing)で割り当てを終了する必要があります。** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### POI(proof of indexing)とは何ですか? +### What is a proof of indexing (POI)? -POI は、インデクサーが割り当てられたサブグラフにインデックスを作成していることを確認するためにネットワークで使用されます。 現在のエポックの最初のブロックに対する POI は、割り当てを終了する際に提出しなければ、その割り当てはインデックス報酬の対象となりません。 あるブロックの POI は、そのブロックまでの特定のサブグラフのデプロイに対するすべてのエンティティストアのトランザクションのダイジェストです。 +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### インデキシングリワードはいつ配布されますか? +### When are indexing rewards distributed? -割り当ては、それがアクティブである間、継続的に報酬を発生させます。 報酬はインデクサによって集められ、割り当てが終了するたびに分配されます。 これは、インデクサーが強制的に閉じようとしたときに手動で行うか、28 エポックの後にデリゲーターがインデクサーのために割り当てを終了することができますが、この場合は報酬がミントされません。 28 エポックは最大の割り当て期間です(現在、1 エポックは約 24 時間です) +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### 保留中のインデクサーの報酬は監視できますか? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -コミュニティが作成したダッシュボードの多くには保留中の報酬値が含まれており、次の手順に従って手動で簡単に確認できます。 +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { - indexer(id: "") { + indexer(id: "") { allocations { activeForIndexer { allocations { @@ -58,127 +59,126 @@ query indexerAllocations { Use Etherscan to call `getRewards()`: -- [報酬契約への Etherscan インターフェイス](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract)に移動します。 - -* `getRewards()`を呼び出します +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - 入力欄に**allocationID**を入力 - - **Query**ボタンをクリック + - Enter the **allocationID** in the input. + - Click the **Query** button. -### 争議(disputes)とは何で、どこで見ることができますか? +### What are disputes and where can I view them? -インデクサークエリとアロケーションは、期間中に The Graph 上で争議することができます。 争議期間は、争議の種類によって異なります。 クエリ/裁定には 7 エポックスの紛争窓口があり、割り当てには 56 エポックスがあります。 これらの期間が経過した後は、割り当てやクエリのいずれに対しても紛争を起こすことはできません。 紛争が開始されると、Fishermen は最低 10,000GRT のデポジットを要求され、このデポジットは紛争が最終的に解決されるまでロックされます。 フィッシャーマンとは、紛争を開始するネットワーク参加者のことです。 +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -論争には **3** の結果が考えられます。漁師の預金も同様です +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- 争議が却下された場合、フィッシャーマンが預かった GRT はバーンされ、争議中のインデクサーはスラッシュされません。 -- 争議が引き分けた場合、フィッシャーマンのデポジットは返還され、争議中のインデクサーはスラッシュされることはありません。 -- 争議が受け入れられた場合、フィッシャーマンがデポジットした GRT は返却され、争議中のインデクサーはスラッシュされ、フィッシャーマンはスラッシュされた GRT の 50%を獲得します。 +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -紛争は、UI のインデクサーのプロファイルページの`紛争`タブで確認できます。 +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### クエリフィーリベートとは何ですか、またいつ配布されますか? +### What are query fee rebates and when are they distributed? -クエリ料金はゲートウェイによって収集され、指数リベート関数に従ってインデクサーに分配されます (GIP[こちら](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)を参照)。 指数リベート関数は、インデクサーがクエリを忠実に処理することで確実に最良の結果を達成する方法として提案されています。 これは、インデクサーが収集する可能性のあるクエリ料金の額と比較して、大量のステーク (クエリの提供時にエラーが発生した場合に削減される可能性がある) を割り当てるようインデクサーに奨励することによって機能します。 +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -割り当てが閉じられると、リベートはインデクサーによって請求されることができるようになります。請求されると、クエリ料金のリベートは、クエリ料金のカットと指数的なリベート関数に基づいて、インデクサーとその委任者に分配されます。 +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### クエリフィーカットとインデキシングリワードカットとは? +### What is query fee cut and indexing reward cut? -`クエリフィーカット` と`インデキシングリワードカット` の値は、インデクサーが クールダウンブロックと共に設定できるデリゲーションパラメータで、インデクサーとそのデリゲーター間の GRT の分配を制御するためのものです。 デリゲーションパラメータの設定方法については、[Staking in the Protocol](/indexing/overview/#stake-in-the-protocol)の最後のステップを参照してください。 +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - クエリ料金のリベートの%を示します。これが95%に設定されている場合、インデクサーは割り当てが閉じられた際に獲得するクエリ料金の95%を受け取り、残りの5%は委任者に支払われます。 +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - インデックスリワードの%を示します。これが95%に設定されている場合、インデクサーは割り当てが閉じられた際に獲得するインデックスリワードの95%を受け取り、残りの5%は委任者で分割されます。 +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### インデクサーはどのサブグラフにインデックスを付けるかをどう見分けるのですか? +### How do Indexers know which subgraphs to index? -インデクサーは、サブグラフのインデキシングの決定に高度な技術を適用することで差別化を図ることができますが、一般的な考え方として、ネットワーク内のサブグラフを評価するために使用されるいくつかの主要な指標について説明します。 +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **キュレーションシグナル** - 特定のサブグラフに適用されたネットワークキュレーションシグナルの割合は、そのサブグラフへの関心を示す指標となり、特にクエリのボリュームが増加しているブートストラップ段階では有効となります。 +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **コレクティド・クエリフィー** - 特定のサブグラフに対してコレクティド・クエリフィー量の履歴データは、将来的な需要に対する指標となります。 +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **ステーク量** - 他のインデクサーの行動を監視したり、特定のサブグラフに割り当てられた総ステーク量の割合を見ることで、インデクサーはサブグラフ・クエリの供給側を監視し、ネットワークが信頼を示しているサブグラフや、より多くの供給を必要としているサブグラフを特定することができます。 +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **インデックス報酬のないサブグラフ** - 一部のサブグラフは、主に IPFS などのサポートされていない機能を使用していたり、メインネット外の別のネットワークをクエリしていたりするため、インデックス報酬を生成しません。 インデクシング・リワードを生成していないサブグラフにはメッセージが表示されます。 +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### 必要なハードウェアは何ですか? +### What are the hardware requirements? -- **Small** - いくつかのサブグラフのインデックス作成を開始するのに十分ですが、おそらく拡張が必要になります -- **Standard** - デフォルトのセットアップであり、k8s/terraform の展開マニフェストの例で使用されているものです -- **Medium** - 100 個のサブグラフと 1 秒あたり 200 ~ 500 のリクエストをサポートするプロダクションインデクサー -- **Large** - 現在使用されているすべてのサブグラフのインデックスを作成し、関連するトラフィックのリクエストに対応します +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### インデクサーが取るべきセキュリティ対策は? +### What are some basic security precautions an Indexer should take? -- **オペレーター ウォレット** - オペレーター ウォレットを設定することは重要な予防措置です。これにより、インデクサーは、ステークを制御するキーと日々の操作を制御するキーとの間の分離を維持できるようになります。 - デイ オペレーション。手順については、[Stake in Protocol](/indexing/overview/#stake-in-the-protocol) を参照してください。 +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **ファイアウォール** - インデクサー サービスのみを公開する必要があり、管理ポートとデータベース アクセスのロックダウンに特に注意を払う必要があります: グラフ ノード JSON-RPC エンドポイント (デフォルト ポート: 8030)、インデクサー管理 API エンドポイント (既定のポート: 18000)、および Postgres データベース エンドポイント (既定のポート: 5432) は公開しないでください。 +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## インフラストラクチャ +## Infrastructure -インデクサーのインフラストラクチャの中心にあるのは、インデックス化されたネットワークを監視し、サブグラフ定義ごとにデータを抽出してロードし、[GraphQL API](/about/#how-the-graph-works) として提供するグラフ ノードです。グラフ ノードは、各インデックス付きネットワークからのデータを公開するエンドポイントに接続する必要があります。データを調達するための IPFS ノード。そのストア用の PostgreSQL データベース。ネットワークとのやり取りを容易にするインデクサー コンポーネント。 +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL データベース** - グラフノードのメインストアで、サブグラフのデータが格納されています。 また、インデクササービスとエージェントは、データベースを使用して、ステートチャネルデータ、コストモデル、およびインデクシングルールを保存します。 +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **データ エンドポイント** - EVM 互換ネットワークの場合、EVM 互換 JSON-RPC API を公開するエンドポイントにグラフ ノードを接続する必要があります。これは、単一のクライアントの形式をとる場合もあれば、複数の負荷を分散するより複雑なセットアップの場合もあります。特定のサブグラフには、アーカイブ モードやパリティ トレース API などの特定のクライアント機能が必要になることに注意してください。 +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS ノード(バージョン 5 未満)** - サブグラフのデプロイメタデータは IPFS ネットワーク上に保存されます。 グラフノードは、サブグラフのデプロイ時に主に IPFS ノードにアクセスし、サブグラフマニフェストと全てのリンクファイルを取得します。 ネットワーク・インデクサーは独自の IPFS ノードをホストする必要はありません。 ネットワーク用の IPFS ノードは、https://ipfs.network.thegraph.com でホストされています。 +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Indexer service** - ネットワークとの必要な外部通信を全て処理します。 コストモデルとインデキシングのステータスを共有し、ゲートウェイからのクエリ要求をグラフノードに渡し、ゲートウェイとのステートチャンネルを介してクエリの支払いを管理します。 +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - ネットワークへの登録、グラフノードへのサブグラフのデプロイ管理、割り当ての管理など、チェーン上のインデクサーのインタラクションを容易にします。 +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Prometheus metrics server** - Graph Node と Indexer コンポーネントは、自分のメトリクスをメトリクス・サーバーにログします。 +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -注: アジャイルなスケーリングをサポートするには、クエリとインデックス作成の懸念事項を異なるノード セット (クエリ ノードとインデックス ノード) に分けることをお勧めします。 +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### ポートの概要 +### Ports overview -> **重要**: ポートを公開する場合は注意してください。**管理ポート**はロックしておく必要があります。これには、Graph Node JSON-RPC と、以下に詳述するインデクサー管理エンドポイントが含まれます。 +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### グラフノード -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Google Cloud で Terraform を使ってサーバーインフラを構築 +### Setup server infrastructure using Terraform on Google Cloud -> 注:インデクサーは、AWS、Microsoft Azure、Alibabaを代替的に使用することができます。 +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### インストールの前提条件 +#### Install prerequisites - Google Cloud SDK -- Kubectl コマンドラインツール +- Kubectl command line tool - Terraform -#### Google Cloud プロジェクトの作成 +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Use Etherscan to call `getRewards()`: cd terraform ``` -- Google Cloud で認証し、新しいプロジェクトを作成 +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Google Cloud Console の\[billing page\](課金ページ) を使用して、新しいプロジェクトの課金を有効にします。 +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Google Cloud の設定を作成します。 +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Google Cloud API の設定 +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- サービスアカウントを作成 +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- データベースと次のステップで作成する Kubernetes クラスター間のピアリングを有効化 +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -243,40 +243,41 @@ gcloud compute addresses create google-managed-services-default \ --purpose=VPC_PEERING \ --network default \ --global \ - --description 'IP Range for peer networks.' gcloud services vpc-peerings connect \ + --description 'IP Range for peer networks.' +gcloud services vpc-peerings connect \ --network=default \ --ranges=google-managed-services-default ``` -- Terraform 設定ファイルを作成(必要に応じて更新してください) +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **注**:全てのランタイム設定変数は、起動時にコマンドのパラメーターとして適用するか、`COMPONENT_NAME_VARIABLE_NAME`(例:`INDEXER_AGENT_ETHEREUM`)という形式の環境変数を使用することができます。 +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### インデクサーエージェント +#### Indexer agent ```sh graph-indexer-agent start \ @@ -487,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### インデクサーサービス +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -513,58 +514,58 @@ graph-indexer-service start \ | pino-pretty ``` -#### インデクサー CLI +#### Indexer CLI -インデクサー CLI は、[`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) アクセス可能なプラグインですターミナルの `graph indexer` で。 +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Indexer CLI によるインデクサー管理 +#### Indexer management using Indexer CLI -**Indexer Management API** と対話するための推奨ツールは **Indexer CLI** で、これは **Graph CLI** を拡張したものです。インデクサーエージェントは、インデクサーに代わって自律的にネットワークと対話するために、 インデクサからの入力を必要とします。インデクサーエージェントの動作を定義する仕組みは、 ** 割り当て管理**モードと ** インデックスルール** です。auto モードでは、Indexer は **indexing rules** を使って、インデックスやクエリを提供するサブグラフを選択するための特定の戦略を適用することができます。ルールは、エージェントが提供する GraphQL API を介して管理され、 Indexer Management API として知られています。手動モードでは、Indexer は **action queue** を使って割り当てアクションを作成し、実行される前に明示的に承認することができます。oversight モードでは、**indexing rules** を使って **action queue** を作成し、実行のために明示的な承認が必要です。 +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### 使い方 +#### Usage -**Indexer CLI**は、通常ポート・フォワーディングを介してインデクサー・エージェントに接続するため、CLI が同じサーバやクラスタ上で動作する必要はありません。 ここでは CLI について簡単に説明します。 +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` - インデクサー管理 API に接続します。 通常、サーバーへの接続はポートフォワーディングによって開かれ、CLI をリモートで簡単に操作できるようになります。 (例:`kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph インデクサー ルール get [options] [<キー1> ...]` - `all` を `` として使用して 1 つ以上のインデックス作成ルールを取得し、すべてのルールを取得するか、`global< /code> グローバルなデフォルトを取得します。追加の引数 --merged` を使用して、展開固有のルールをグローバル ルールとマージすることを指定できます。これは、インデクサー エージェントでそれらがどのように適用されるかです。 +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - 1 つまたは複数のインデキシング規則を設定します。 +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - 利用可能な場合はサブグラフ配置のインデックス作成を開始し、`decisionBasis`を`always`に設定するので、インデクサー・エージェントは常にインデキシングを選択します。 グローバル ルールが always に設定されている場合、ネットワーク上のすべての利用可能なサブグラフがインデックス化されます。 +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` - 配置のインデックス作成を停止し、`decisionBasis`を never に設定することで、インデックスを作成する配置を決定する際にこの配置をスキップします。 +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` - 配置の`thedecisionBasis` を`rules`に設定し、インデクサーエージェントがインデキシングルールを使用して、この配置にインデックスを作成するかどうかを決定するようにします。 +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` - キューの割り当てアクション。 +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` - queue reallocate actionを実行します。 +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` - queue unallocate actionを実行します。 +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - idが未指定の場合はキュー内の全てのアクションをキャンセル、それ以外はスペースをセパレータとしたidの配列をキャンセルします。 +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - 複数のアクションの実行を承認します。 +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` - 承認されたアクションを直ちに実行するようにワーカーを強制します。 +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -出力にルールを表示するすべてのコマンドは、`-output`引数を使用して、サポートされている出力形式(`table`, `yaml`, and `json`) の中から選択できます。 +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### インデキシングルール +#### Indexing rules -インデキシングルールは、グローバルなデフォルトとして、または ID を使用して特定のサブグラフデプロイメントに適用できます。 `deployment`と`decisionBasis`フィールドは必須で、その他のフィールドはすべてオプションです。 インデキシングルールが`decisionBasis`として`rules` を持つ場合、インデクサー・エージェントは、そのルール上の非 NULL の閾値と、対応する配置のためにネットワークから取得した値を比較します。 サブグラフデプロイメントがいずれかのしきい値以上(または以下)の値を持つ場合、それはインデキシングのために選択されます。 +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -例えば、グローバル ルールの`minStake`が**5**(GRT) の場合、5(GRT) 以上のステークが割り当てられているサブグラフデプロイメントは、インデックスが作成されます。 しきい値ルールには、 `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`があります。 +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -データモデル +Data model: ```graphql type IndexingRule { @@ -598,7 +599,7 @@ IndexingDecisionBasis { } ``` -インデックスルールの使用例: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -610,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### アクションキュー CLI +#### Actions queue CLI -indexer-cli は、アクションキューを手動で操作するための `actions` モジュールを提供します。これは、アクションキューと対話するために、インデクサ管理サーバによってホストされる **Graphql API** を使用します。 +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -アクション実行ワーカーは、`ActionStatus = approved` である場合にのみ、実行するためにキューからアイテムを取得します。推奨されるパスでは、アクションは ActionStatus = queued でキューに追加されるので、オンチェインで実行するためには承認されなければなりません。一般的なフローは次のようになります。 +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- サードパーティのオプティマイザツールやindexer-cliのユーザーによってキューに追加されたアクション -- インデクサーは `indexer-cli` を使って、キューに入れられたすべてのアクションを見ることができます。 -- インデクサー(または他のソフトウェア) は、キュー内のアクションを `indexer-cli` を使って承認または取り消すことができます。approve と cancel コマンドは、入力としてアクション ID の配列を取ります。 -- 実行ワーカーは定期的に承認されたアクションのためにキューをポーリングします。キューから `approved` アクションを取得し、実行を試み、実行状況に応じて db の値を `success` または `failed` に更新します。 -- アクションが成功した場合、ワーカーは、エージェントが `auto` または `oversight` モードの間に手動アクションを取るときに便利な、今後どのように割り当てを管理するかを示すインデックス付けルールが存在することを確認します。 -- インデクサーはアクションキューを監視してアクションの実行履歴を確認し、必要であれば、実行に失敗したアクションアイテムを再承認して更新することができます。アクションキューは、キューに入れられ、実行されたすべてのアクションの履歴を提供します。 +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -データモデル: +Data model: ```graphql Type ActionInput { @@ -656,7 +657,7 @@ ActionType { } ``` -ソースからの使用例: +Example usage from source: ```bash graph indexer actions get all @@ -676,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -割り当て管理でサポートされるアクションタイプは、入力要件が異なることに注意してください。 +Note that supported action types for allocation management have different input requirements: -- `Allocate` - 特定のサブグラフの配置にステークを割り当てます。 +- `Allocate` - allocate stake to a specific subgraph deployment - - 必要なアクションパラメータを指定します: - - デプロイメントID - - 量 + - required action params: + - deploymentID + - amount -- `Unallocate` - 割り当てを終了し、他の場所に再割り当てするためにステークを解放します。 +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - 必要なアクションパラメータを指定します: - - アロケーションID - - デプロイメントID - - 任意のアクションパラメータ: + - required action params: + - allocationID + - deploymentID + - optional action params: - poi - - force (グラフノードが提供するものと一致しなくても、提供されたPOIを使用することを強制する) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - アロケーションをアトミックにクローズし、同じサブグラフのために新しいアロケーションをオープンします。 +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - 必要なアクションパラメータを指定します: - - アロケーションID - - デプロイメントID - - 量 - - 任意のアクションパラメータ: + - required action params: + - allocationID + - deploymentID + - amount + - optional action params: - poi - - force (グラフノードが提供するものと一致しなくても、提供されたPOIを使用することを強制する) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### コストモデル +#### Cost models -コストモデルは、マーケットやクエリ属性に基づいて、クエリの動的な価格設定を行います。 インデクサーサービスは、クエリに応答する予定の各サブグラフのコストモデルをゲートウェイと共有します。 一方、ゲートウェイはコストモデルを使用して、クエリごとにインデクサーの選択を決定し、選択されたインデクサーと支払いの交渉を行います。 +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Agora 言語は、クエリのコストモデルを宣言するための柔軟なフォーマットを提供します。 Agora のコストモデルは、GraphQL クエリのトップレベルのクエリごとに順番に実行される一連のステートメントです。 各トップレベルのクエリに対して、それにマッチする最初のステートメントがそのクエリの価格を決定します。 +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -ステートメントは、GraphQL クエリのマッチングに使用される述語と、評価されると decimal GRT でコストを出力するコスト式で構成されます。 クエリの名前付き引数の位置にある値は、述語の中に取り込まれ、式の中で使用されます。 また、グローバルを設定し、式のプレースホルダーとして代用することもできます。 +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -コストモデルの例: +Example cost model: ``` # This statement captures the skip value, @@ -726,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -上記のモデルを使用したクエリのコスト計算の例: +Example query costing using the above model: -| クエリ | 価格 | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | -| { トークン { シンボル } } | 0.1 GRT | +| { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### コストモデルの適用 +#### Applying the cost model -コストモデルは Indexer CLI を通じて適用され、それをインデクサー・エージェントの Indexer Management API に渡してデータベースに格納します。 その後、インデクサーサービスがこれを受け取り、ゲートウェイから要求があるたびにコスト・モデルを提供します。 +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## ネットワークとのインタラクション +## Interacting with the network -### プロトコルへのステーク +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -インデクサーがプロトコルにGRTをステークすると、[Indexerコンポーネント](/indexing/overview/#indexer-components)を立ち上げ、ネットワークとのやりとりを開始することができます。 +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### トークンの承認 +#### Approve tokens -1. ブラウザで[Remix app](https://remix.ethereum.org/)を開きます。 +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `File Explorer`で**GraphToken.abi**というファイルを作成し、 [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json)を指定します。 +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. 環境から[`Injected Web3`] を選択し、`Account`でインデクサーアドレスを選択します。 +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. GraphToken のコントラクトアドレスの設定 - `At Address`の横に GraphToken のコントラクトアドレス(`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) を貼り付け、`At Address`ボタンをクリックして適用します。 +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. `approve(spender, amount)`関数を呼び出し、ステーキング契約を承認します。 `spender`にはステーキングコントラクトアドレス(`0xF55041E37E12cD407ad00CE2910B8269B01263b9`)を、`amount`にはステークするトークン(単位:wei)を記入します。 +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### トークンをステークする +#### Stake tokens -1. ブラウザで[Remix app](https://remix.ethereum.org/)を開きます。 +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `File Explorer`で**Staking.abi**という名前のファイルを作成し、Staking ABI を指定します。 +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. 環境から[`Injected Web3`] を選択し、`Account`でインデクサーアドレスを選択します。 +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Staking contract address の設定 - `At Address`の横に Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) を貼り付け、 `At Address`ボタンをクリックして適用します。 +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. `stake()`を呼び出して、GRT をプロトコルにステークします。 +6. Call `stake()` to stake GRT in the protocol. -7. (オプション)インデクサーは、資金を管理する鍵と、サブグラフへの割り当てや(有料の)クエリの提供などの日常的な動作を行う鍵とを分離するために、別のアドレスをインデクサインフラストラクチャのオペレーターとして承認することができます。 オペレーターを設定するには、オペレーターのアドレスを指定して`setOperator()`をコールします。 +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (オプション) 報酬の分配を制御し、デリゲータを戦略的に引き付けるために、 インデクサーは indexingRewardCut (parts per million)、 queryFeeCut (parts per million)、 cooldownBlocks (number of blocks) を更新することで、 デリゲーションパラメータを更新することができます。 これを行うには`setDelegationParameters()`をコールします。 次の例では、クエリフィーカットをクエリリベートの 95%をインデクサーに、5%をデリゲーターに分配するように設定し、インデクサーリワードカットをインデキシング報酬の 60%をインデクサーに、40%をデリゲーターに分配するよう設定し、`thecooldownBlocks` 期間を 500 ブロックに設定しています。 +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -805,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### アロケーションの寿命 +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -インデクサーは、チェーン上に配置を作成する前に、チェーンヘッドにサブグラフの配置を同期させるために、オフチェーン同期機能を利用することを推奨します。この機能は、同期に28エポック以上かかる可能性があるサブグラフや、不定期に失敗する可能性があるサブグラフに特に有効です。 +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From eda5bc520fe8aaa0e626138b08bf0c95bf19fb06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:41 -0500 Subject: [PATCH 0077/1534] New translations overview.mdx (Korean) --- website/src/pages/ko/indexing/overview.mdx | 60 +++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/website/src/pages/ko/indexing/overview.mdx b/website/src/pages/ko/indexing/overview.mdx index bf51dec0b32b..3f9b35378f86 100644 --- a/website/src/pages/ko/indexing/overview.mdx +++ b/website/src/pages/ko/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexing +title: Indexing Overview +sidebarTitle: Overview --- Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. @@ -59,8 +60,7 @@ query indexerAllocations { Use Etherscan to call `getRewards()`: - Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* To call `getRewards()`: +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - Enter the **allocationID** in the input. - Click the **Query** button. @@ -110,12 +110,12 @@ Indexers may differentiate themselves by applying advanced techniques for making - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. - **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -135,7 +135,7 @@ At the center of an Indexer's infrastructure is the Graph Node which monitors th - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,26 +147,26 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | ### Setup server infrastructure using Terraform on Google Cloud @@ -256,7 +256,7 @@ indexer= cat > terraform.tfvars < Date: Fri, 14 Feb 2025 12:44:42 -0500 Subject: [PATCH 0078/1534] New translations overview.mdx (Dutch) --- website/src/pages/nl/indexing/overview.mdx | 296 ++++++++++----------- 1 file changed, 148 insertions(+), 148 deletions(-) diff --git a/website/src/pages/nl/indexing/overview.mdx b/website/src/pages/nl/indexing/overview.mdx index 106f466d4aff..cabbccc1346f 100644 --- a/website/src/pages/nl/indexing/overview.mdx +++ b/website/src/pages/nl/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexeren +title: Indexing Overview +sidebarTitle: Overview --- Indexeers zijn node-operators in The Graph Netwerk die Graph Tokens (GRT) inzetten om indexing- en queryverwerkingsdiensten te leveren. Indexeerders verdienen querykosten en indexingsbeloningen voor hun diensten. Ze verdienen ook querykosten die worden terugbetaald volgens een exponentiële functie. @@ -10,35 +11,35 @@ Indexeerders selecteren subgraphs om te indexeren op basis van het curatiesignaa ## FAQ -### Wat is de minimale inzet vereist om een Indexeerder op het netwerk te zijn? +### What is the minimum stake required to be an Indexer on the network? -De minimale inzet voor een Indexeerder is momenteel vastgesteld op 100K GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### Wat zijn de inkomstenstromen voor een Indexeerder? +### What are the revenue streams for an Indexer? -**Querykostenrebates** - Betalingen voor het verwerken van queries op het netwerk. Deze betalingen worden bemiddeld via state channels tussen een Indexeerder en een gateway. Elke query-aanvraag van een gateway bevat een betaling en de bijbehorende reactie een bewijs van geldigheid van het queryresultaat. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexeringsbeloningen** - De indexeringsbeloningen worden gegenereerd via de 3% jaarlijkse protocolbrede inflatie en verdeeld onder de Indexeerders die subgraphs voor het netwerk indexeren. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### Hoe worden indexeringsbeloningen verdeeld? +### How are indexing rewards distributed? -Indexeringsbeloningen komen van de protocolinflatie die is vastgesteld op 3% jaarlijkse uitgifte. Ze worden verdeeld over de subgraphs op basis van de verhouding van al het curatiesignaal, en vervolgens proportioneel verdeeld onder de Indexers op basis van hun toegewezen inzet op die subgraph. **Een allocatie moet worden gesloten met een geldig bewijs van indexering (Proof of Indexing) dat voldoet aan de normen die door het arbitrage charter zijn vastgesteld om in aanmerking te komen voor beloningen. ** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** -Vele hulpmiddelen zijn gecreëerd door de community voor het berekenen van beloningen; je vindt een verzameling ervan georganiseerd in de [Community Guides collectie](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). Je kunt ook een actuele lijst van hulpmiddelen vinden in de #Delegators en #indexers kanalen op de [Discord server](https://discord.gg/graphprotocol). Hier is een link naar de [aanbevolen allocatie optimalisator](https://github.com/graphprotocol/allocation-optimizer) die is geïntegreerd met de Indexeerder-softwarestack. +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### Wat is een bewijs van indexering (Proof of Indexing)? +### What is a proof of indexing (POI)? -POI's worden in het netwerk gebruikt om te verifiëren dat een indexer aan het indexeren is op de subgraph waaraan zij zijn toegewezen. Een POI voor het eerste blok van de huidige epoch moet opgegeven worden tijdens het sluiten van een allocatie, zodat die allocatie in aanmerking komt voor indexeringsbeloningen. Een POI voor een blok is een verwerking van alle entiteitwinkeltransacties voor een specifieke subgraph-implementatie tot aan en inclusief dat blok. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### Wanneer worden indexeringsbeloningen verdeeld? +### When are indexing rewards distributed? -Allocaties verzamelen voortdurend beloningen terwijl ze actief zijn tot een maximum duur van 28 epochs. Beloningen worden verzameld door de Indexers en verdeeld wanneer ze hun allocaties sluiten. Dat gebeurt handmatig wanneer de Indexeerder ze wil sluiten of, na 28 epochs kan een Delagator de allocatie voor de Indexeerder sluiten, maar dit resulteerd in geen beloningen. 28 epochs is de maximale duur van een allocatie (momenteel duurt één epoch ongeveer 24 uur). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### Kunnen ongerealiseerde indexeringsbeloningen worden bekeken? +### Can pending indexing rewards be monitored? -Het RewardsManager-contract heeft een read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) functie die kan worden gebruikt om te controleren hoeveel ongerealiseerde beloningen er zijn voor een specifieke allocatie. +The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Veel van de door de community gemaakte dashboards bevatten waarden van ongerealiseerde beloningen en ze kunnen gemakkelijk handmatig worden gecontroleerd door deze stappen te volgen: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,139 +57,138 @@ query indexerAllocations { } ``` -Gebruik Etherscan om `getRewards()` aan te roepen: +Use Etherscan to call `getRewards()`: -- Navigeer naar de [Etherscan-interface naar het Rewards-contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: + - Expand the **9. getRewards** dropdown. + - Enter the **allocationID** in the input. + - Click the **Query** button. -* Om `getRewards()` aan te roepen: - - Klap het dropdown-menu van **9. getRewards** uit. - - Voer het **allocationID** in het invoerveld in. - - Klik op de **Query** knop. +### What are disputes and where can I view them? -### Wat zijn geschillen en waar kan ik ze bekijken? +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -Een indexers query's en allocaties kunnen beide betwist worden op The Graph tijdens de dispuutperiode. De dispuutperiode varieert afhankelijk van het type dispuut. Query's/attestaties hebben een 7 epochs dispuutperiode waarbij allocaties 56 epochs hebben. Nadat deze periodes aflopen kunnen disputen niet meer geopnd worde thegen allocaties of query's. Wanneer een dispuut geopend is, is een storting van minimaal 10,000 GRT vereist door de Fisherman, welke vergrendeld zal worden to het dipuut gefinalizeerd is en een resolutie gegeven is. Fisherman zijn alle netwerkdeelnemers die disputen openen. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -Disputen hebben **drie** mogelijke uitkomsten, net als de storting van de Fishermen. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -- Als het dispuut wordt verworpen, wordt de door de Fishermen gestorte GRT verbrand, en de betwiste Indexeerder zal niet worden geslashed. -- Als het dispuut wordt beoordeeld als een gelijkspel, wordt de storting van de Fishermen teruggestort, en de betwiste Indexeerder zal niet worden geslashed. -- Als het dispuut wordt geaccepteerd, wordt de door de Fishermen gestorte GRT teruggestort, de betwiste Indexeerder zal worden geslashed. De Fishermen ontvangen 50% van de GRT die geslashed is. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -Disputen kunnen in de UI op de profielpagina van de Indexeerder onder het tabblad `Disputen` worden bekeken. +### What are query fee rebates and when are they distributed? -### Wat zijn query kosten rebates en wanneer worden ze uitgedeeld? +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Querykosten worden verzameld door de gateway en verdeeld aan Indexeerders volgens de exponentiële rebate-functie (zie de GIP [hier](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). De exponentiële rebate-functie is voorgesteld als een manier om ervoor te zorgen dat Indexeerders het beste resultaat behalen door trouw query's te verwerken. Het werkt door het voor Indexeerders aantrekkelijk the maken om een grote hoeveelheid in te zetten (die kan worden geslashed voor fouten bij het verwerken van een query) in verhouding tot de hoeveelheid querykosten die ze kunnen innen. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -Zodra een allocatie is gesloten, zijn de rebates beschikbaar om door de Indexeerder te worden geclaimd. Bij het claimen worden de query rebates verdeeld aan de Indexeerder en hun Delegators op basis van de query fee cut en de exponentiële rebate-functie. +### What is query fee cut and indexing reward cut? -### Wat is de Query Fee Cut en de Indexing Reward Cut? +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -De `queryFeeCut` en `indexingRewardCut` waarden zijn delegatie parameters die de Indexer kan instellen samen met de cooldownBlocks om de verdeling GRT te controleren tussen de Indexer en hun Delegators. Zie de laatste stappen in [Inzetten in het Protocol](/indexing/overview/#stake-in-the-protocol) voor instructies op delegatie parameters in the stellen. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **queryFeeCut** - het % van de query kosten rebates die worden verdeeld aan de Indexeerder. Als dit is ingesteld op 95%, ontvangt de Indexeerder 95% van de query kosten die zijn verdiend wanneer een allocatie wordt gesloten, met de overige 5% gaande naar Delegators. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -- **indexingRewardCut** - het % van de indexeringsbeloningen die worden verdeeld aan de Indexeerder. Als dit is ingesteld op 95%, ontvangt de Indexeerder 95% van de indexeringsbeloningen wanneer een allocatie wordt gesloten, met de overige 5% verdeeld onder de Delegators. +### How do Indexers know which subgraphs to index? -### Hoe weten Indexeerders welke subgraphs te indexeren? +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -Indexeerders kunnen zich onderscheiden door geavanceerde technieken toe te passen bij het maken van beslissingen over subgraph indexering maar om een algemeen idee te geven, bespreken we enkele belangrijke meetpunten die worden gebruikt om subgraphs te evalueren in het netwerk: +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Curatie Signaal** - De proportie van het netwerk curatie signaal dat is gesignaleerd op een bepaalde subgraph is een goede indicator van de interesse in die subgraph, vooral tijdens de bootstrap fase wanneer het query volume toeneemt. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Verzamelde query kosten** - De historische data voor het volume van verzamelde query kosten voor een specifieke subgraph is een goede indicator voor toekomstige vraag. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Hoeveelheid inzet** - Het monitoren van het gedrag van andere Indexeerders of het bekijken van verhoudingen van het totaal aan inzet naar specifieke subgraphs kan een Indexeerder helpen de aanbodzijde voor subgraph query's te monitoren om subgraphs te identificeren waarin het netwerk vertrouwen toont of subgraphs die mogelijk meer aanbod nodig hebben. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -- **Subgraphs zonder indexeringsbeloningen** - Sommige subgraphs genereren geen indexeringsbeloningen, meestal omdat ze niet-ondersteunde functies zoals IPFS gebruiken of query's versturen naar een netwerk buiten het mainnet. Je zult een bericht zien op een subgraph als deze geen indexeringsbeloningen genereert. +### What are the hardware requirements? -### Wat zijn de hardware vereisten? +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -- **Klein** - Genoeg om te beginnen met het indexeren van enkele subgraphs, zal waarschijnlijk uitgebreid moeten worden. -- **Standaard** - Standaard setup, dit wordt gebruikt in het voorbeeld k8s/terraform uitvoeringsmanifest. -- **Middel** - Indexer die 100 subgraphs ondersteund en 200-500 query's per seconde verwerkt. -- **Groot** - Voorbereid om alle momenteel gebruikte subgraphs te indexeren en de bijbehorende query's te verwerken. +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -| Setup | Postgres
(CPUs) | Postgres
(Geheugen in GBs) | Postgres
(schijf in TBs) | VMs
(CPUs) | VMs
(geheugen in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Klein | 4 | 8 | 1 | 4 | 16 | -| Standaard | 8 | 30 | 1 | 12 | 48 | -| Middel | 16 | 64 | 2 | 32 | 64 | -| Groot | 72 | 468 | 3.5 | 48 | 184 | +### What are some basic security precautions an Indexer should take? -### Wat zijn enkele basisveiligheidsmaatregelen die een Indexeerder moet nemen? +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Operator wallet** - Het opzetten van een operator wallet is een belangrijke voorzorgsmaatregel omdat het een Indexeerder in staat stelt om scheiding te behouden tussen de sleutels die de GRT beheren en degenen die de dagelijkse operaties beheren. Zie [Inzet in het Protocol](/indexing/overview/#stake-in-the-protocol) voor instructies. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -- **Firewall** - Alleen de Indexer service hoeft openbaar te worden blootgesteld en er moet extra aandacht worden besteed aan het vergrendelen van de admin poorten en database toegang: de Graph Node Jason-RPC endpoint (standaardpoort: 8030), de Indexer management API endpoint (standaardpoort: 18000), en de Postgres database endpoint (standaardpoort: 5432) mogen niet worden blootgesteld. +## Infrastructure -## Infrastructuur +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -Het hart van de infrastructuur van een Indexeerder is de Graph Node, die de geïndexeerde netwerken monitort, data extract en laadt volgens een subgraphdefinitie en deze dient als een [GraphQL API](/about/#how-the-graph-works). De Graph Node moet verbonden zijn met een eindpunt dat gegevens van elke geïndexeerd netwerk blootstelt; een IPFS-node voor het verkrijgen van data, een PostgreSQL database voor de opslag; en de Indexer-componenten die de interacties met het netwerk faciliteren. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **PostgreSQL database** - De hoofdopslag voor de Graph Node, hier wordt subgraph data opgeslagen. De Indexer-service en -agent gebruiken de database ook om gegevens van state channels, kostenmodellen, indexatieregels en allocatieacties op te slaan. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **Data eindpunt** Voor EVM-compatibele netwerken moet de Graph Node verbonden zijn met een eindpunt dat een EVM-compatibele JSON-RPC API blootstelt. Dit kan de vorm aannemen van een enkele client of een complexere setup die de belasting verdeeld over meerdere clients. Het is belangrijk om te weten dat sommige subgraphs specifieke eisen hebben voor de client, zoals archiefmodus en/of de parity tracing API. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **IPFS node (versie lager dan 5)** - Subgraph implementatie metadata is opgeslagen op het IPFS netwerk. De Graph Node communiceert voornamelijk met de IPFS-node tijdens de implementatie van subgraphs om het subgraph manifest en alle gelinkte bestanden op te vragen. Netwerk Indexers hoeven geen eigen IPFS-node te hosten, een IPFS-node voor het netwerk wordt gehost op https://ipfs.network.thegraph.com. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer service** - Beheert alle vereiste externe communicatie met het netwerk. Deelt kostenmodellen en indexeerstatussen, stuurt query's van gateways door naar een Graph Node en beheert de query betalingen via state channels met de gateway. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Indexer agent** - Faciliteert de interacties van de Indexeerders op de chain, inclusief registratie op het netwerk, beheer van subgraph implementaties op de Graph Node(s) en beheer van allocaties. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -- **Prometheus metrics-server** - De Graph Node en Indexer-componenten versturen hun metrics naar de metrics-server. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -Tip: Om wendbare schaalvergroting te ondersteunen, wordt aanbevolen om Query- en Indexeringszaken te scheiden tussen verschillende sets nodes: Query Nodes en Index Nodes. +### Ports overview -### Poorten overzicht - -> **Belangrijk**: Wees voorzichtig met het openbaar blootstellen van poorten - **admin poorten** moeten vergrendeld blijven. Dit is inclusief de Graph Node JSON-RPC en de Indexer management endpoints, zoals hieronder beschreven. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Poort | Doel | Routes | CLI-Argument | Omgevingsvariabele | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(voor subgraph query's) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(voor subgraph abonnementen) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(voor het beheren van implementaties) | / | --admin-port | - | -| 8030 | Subgraph indexeerstatus API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Poort | Doel | Routes | CLI-Argument | Omgevingsvariabele | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(voor betaalde subgraph query's) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Poort | Doel | Routes | CLI-Argument | Omgevingsvariabele | -| ----- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Het opzetten van server infrastructuur met Terraform op Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Let op: Indexeerders kunnen als alternatief AWS, Microsoft Azure or Alibaba gebruiken. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Installeer vereisten +#### Install prerequisites - Google Cloud SDK - Kubectl command line tool - Terraform -#### Maak een Google Cloud project aan +#### Create a Google Cloud Project -- Kloon of navigeer naar de [ Indexer repository](https://github.com/graphprotocol/indexer). +- Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). -- Navigeer naar de `./terraform` map, dit is waar alle commando's moeten worden uitgevoerd. +- Navigate to the `./terraform` directory, this is where all commands should be executed. ```sh cd terraform ``` -- Authenticeer met Google Cloud en maak een nieuw project. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Gebruik de factureringspagina van Google Cloud Console om facturering voor het nieuwe project in te schakelen. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Maak een Google Cloud configuratie. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Schakel de vereiste Google Cloud API's in. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Maak een serviceaccount. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Schakel peering in tussen de database en de Kubernetes cluster die in de volgende stap wordt gemaakt. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,35 +249,35 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Maak het minimaal terraform-configuratiebestand (update indien nodig). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **OPMERKING**: Alle configuratievariabelen tijdens runtime kunnen worden toegepast als parameters bij het opstartcommando of met behulp van omgevingsvariabelen in het format `COMPONENT_NAME_VARIABLE_NAME`(bijv. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### Indexer Agent +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Indexer Service +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -516,7 +516,7 @@ graph-indexer-service start \ #### Indexer CLI -De Indexer CLI is een plug-in voor [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) toegankelijk in de terminal op `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 @@ -615,7 +615,7 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: - Action added to the queue by the 3rd party optimizer tool or indexer-cli user - Indexer can use the `indexer-cli` to view all queued actions @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 31d1252e6ff93a7a5f7247897d083d420a621ae3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:43 -0500 Subject: [PATCH 0079/1534] New translations overview.mdx (Polish) --- website/src/pages/pl/indexing/overview.mdx | 60 +++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/website/src/pages/pl/indexing/overview.mdx b/website/src/pages/pl/indexing/overview.mdx index bf51dec0b32b..3f9b35378f86 100644 --- a/website/src/pages/pl/indexing/overview.mdx +++ b/website/src/pages/pl/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexing +title: Indexing Overview +sidebarTitle: Overview --- Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. @@ -59,8 +60,7 @@ query indexerAllocations { Use Etherscan to call `getRewards()`: - Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* To call `getRewards()`: +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - Enter the **allocationID** in the input. - Click the **Query** button. @@ -110,12 +110,12 @@ Indexers may differentiate themselves by applying advanced techniques for making - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. - **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -135,7 +135,7 @@ At the center of an Indexer's infrastructure is the Graph Node which monitors th - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. @@ -147,26 +147,26 @@ Note: To support agile scaling, it is recommended that query and indexing concer #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | ### Setup server infrastructure using Terraform on Google Cloud @@ -256,7 +256,7 @@ indexer= cat > terraform.tfvars < Date: Fri, 14 Feb 2025 12:44:45 -0500 Subject: [PATCH 0080/1534] New translations overview.mdx (Portuguese) --- website/src/pages/pt/indexing/overview.mdx | 499 ++++++++++----------- 1 file changed, 249 insertions(+), 250 deletions(-) diff --git a/website/src/pages/pt/indexing/overview.mdx b/website/src/pages/pt/indexing/overview.mdx index f6df4faabcc2..56816067bc9a 100644 --- a/website/src/pages/pt/indexing/overview.mdx +++ b/website/src/pages/pt/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexação +title: Visão Geral sobre a Indexação +sidebarTitle: Visão geral --- Indexadores são operadores de nodes na Graph Network que fazem staking em Graph Tokens (GRT) para prover serviços de indexação e processamento de consultas em query (queries). Os indexadores ganham taxas de consulta e recompensas de indexação pelos seus serviços. Eles também ganham taxas de query que são rebatadas de acordo com uma função de rebate exponencial. @@ -8,43 +9,43 @@ O GRT em staking no protocolo é sujeito a um período de degelo, e pode passar Indexadores selecionam subgraphs para indexar com base no sinal de curadoria do subgraph, onde Curadores depositam GRT em staking para indicar quais subgraphs são de qualidade alta e devem ser priorizados. Consumidores (por ex., aplicativos) também podem configurar parâmetros para os quais Indexadores processam queries para seus subgraphs, além de configurar preferências para o preço das taxas de query. -## Perguntas Frequentes +## FAQ -### Qual o stake mínimo exigido para ser um Indexador na rede? +### What is the minimum stake required to be an Indexer on the network? -O stake mínimo atual para um Indexador é de 100 mil GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### Quais são as fontes de renda para um Indexador? +### What are the revenue streams for an Indexer? -**Rebates de taxas de consulta** - Pagamentos por serviço de consultas na rede. Estes pagamentos são mediados através de canais de estado entre um Indexador e um gateway. Cada pedido de query de um gateway contém um pagamento e a resposta correspondente: uma prova de validade de resultado de query. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Recompensas de indexação** — são distribuídas a Indexadores que indexam lançamentos de subgraph para a rede. São geradas através de uma inflação de 3% para todo o protocolo. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### Como são distribuídas as recompensas de indexação? +### How are indexing rewards distributed? -As recompensas de indexação vêm da inflação do protocolo, que é configurada em 3% da emissão anual. Elas são distribuídas em subgraphs, com base na proporção de todos os sinais de curadoria em cada um, e depois distribuídos proporcionalmente a Indexadores baseado no stake que alocaram naquele subgraph. ** Para ser elegível a recompensas, uma alocação deve ser fechada com uma prova de indexação válida (POI) que atende aos padrões determinados pela carta de arbitragem.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** -A comunidade criou várias ferramentas para calcular recompensas, organizadas na [coleção de Guias da Comunidade](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). Há também uma lista atualizada de ferramentas nos canais #Delegators e #Indexers no [servidor do Discord](https://discord.gg/graphprotocol). No próximo link, temos um [otimizador de alocações recomendadas](https://github.com/graphprotocol/allocation-optimizer) integrado com o stack de software de indexador. +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### O que é uma prova de indexação (POI)? +### What is a proof of indexing (POI)? -POIs são usadas na rede, para verificar que um Indexador está indexando os subgraphs nos quais eles alocaram. Uma POI para o primeiro bloco da epoch atual deve ser enviada ao fechar uma alocação, para que aquela alocação seja elegível a recompensas de indexação. Uma POI para um bloco serve como resumo para todas as transações de armazenamento de entidade para um lançamento específico de subgraph, até, e incluindo, aquele bloco. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### Quando são distribuídas as recompensas de indexação? +### When are indexing rewards distributed? -As alocações continuamente acumulam recompensas enquanto permanecerem ativas e alocadas dentro de 28 epochs. As recompensas são coletadas pelos Indexadores, e distribuídas sempre que suas alocações são fechadas. Isto acontece ou manualmente, quando o Indexer quer fechá-las à força; ou após 28 epochs, quando um Delegante pode fechar a alocação para o Indexador, mas isto não rende recompensas. A vida máxima de uma alocação é de 28 epochs (no momento, um epoch dura cerca de 24 horas). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### É possível monitorar recompensas de indexação pendentes? +### Can pending indexing rewards be monitored? -O contrato RewardsManager tem uma função [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) de apenas-leitura que pode ser usada para conferir as recompensas pendentes para uma alocação específica. +The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Muitos dos painéis feitos pela comunidade incluem valores pendentes de recompensas, que podem facilmente ser conferidos de forma manual ao seguir os seguintes passos: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. Consulte o [subgraph da mainnet](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) para conseguir as IDs para todas as alocações ativas: +1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { - indexer(id: "") { + indexer(id: "") { allocations { activeForIndexer { allocations { @@ -56,139 +57,138 @@ query indexerAllocations { } ``` -Use o Etherscan para chamar o `getRewards()`: +Use Etherscan to call `getRewards()`: -- Navegue à [interface do Etherscan para o contrato de Recompensas](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: + - Expand the **9. getRewards** dropdown. + - Enter the **allocationID** in the input. + - Click the **Query** button. -* Para chamar o `getRewards()`: - - Abra o dropdown **9. getRewards**. - - Insira a **allocationID**. - - Clique no botão **Query**. +### What are disputes and where can I view them? -### O que são disputas e onde posso vê-las? +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -As consultas em query e alocações de Indexadores podem ser disputadas no The Graph durante o período de disputa. O período de disputa varia dependendo do tipo de disputa. Consultas/atestações têm uma janela de disputa de 7 epochs, enquanto alocações duram até 56 epochs. Após o vencimento destes períodos, não se pode abrir disputas contra alocações ou consultas. Quando uma disputa é aberta, um depósito mínimo de 10.000 GRT é exigido pelos Pescadores, que será trancado até ser finalizada a disputa e servida uma resolução. Pescadores são quaisquer participantes de rede que abrem disputas. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -Há **três** possíveis resultados para disputas, assim como o depósito dos Pescadores. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -- Se a disputa for rejeitada, o GRT depositado pelo Pescador será queimado, e o Indexador disputado não será penalizado. -- Se a disputa terminar em empate, o depósito do Pescador será retornado, e o Indexador disputado não será penalizado. -- Caso aceita a disputa, o GRT depositado pelo Pescador será retornado, o Indexador disputado será penalizado, e o(s) Pescador(es) ganhará(ão) 50% do GRT cortado. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -As disputas podem ser visualizadas na interface na página de perfil de um Indexador, sob a aba `Disputes` (Disputas). +### What are query fee rebates and when are they distributed? -### O que são rebates de taxas de consulta e quando eles são distribuídos? +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -As taxas de query são coletadas pelo gateway e distribuídas aos Indexadores de acordo com a função de rebate exponencial (veja o GIP [aqui](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). A tal função é proposta como uma maneira de garantir que indexadores alcancem o melhor resultado ao servir queries fieis. Ela funciona com o incentivo de Indexadores para alocarem uma grande quantia de stake (que pode ser cortada por errar ao servir um query) relativa à quantidade de taxas de query que possam colecionar. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -Quando uma alocação é fechada, os rebates podem ser reivindicados pelo Indexador. Ao reivindicar, os rebates de taxa de consulta são distribuídos ao Indexador e os seus Delegantes com base na porção de taxas de consulta e na função de rebate exponencial. +### What is query fee cut and indexing reward cut? -### O que são porção de taxa de consulta e porção de recompensa de indexação? +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -Os valores `queryFeeCut` e `indexingRewardCut` são parâmetros de delegação que o Indexador pode configurar junto com o `cooldownBlocks` para controlar a distribuição de GRT entre o Indexador e os seus Delegantes. Veja os últimos passos no [Staking no Protocolo](/indexing/overview/#stake-in-the-protocol) para instruções sobre como configurar os parâmetros de delegação. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **queryFeeCut** - o % de rebates de taxas de query a ser distribuído ao Indexador. Se isto for configurado em 95%, o Indexador receberá 95% das taxas de query ganhas quando uma alocação for fechada, com os outros 5% destinados aos Delegantes. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -- **indexingRewardCut** - o % de recompensas de indexação a ser distribuído ao Indexador. Se isto for configurado em 95%, o Indexador receberá 95% do pool de recompensas de indexação ao fechamento de uma alocação e os Delegantes dividirão os outros 5%. +### How do Indexers know which subgraphs to index? -### Como os Indexadores podem saber quais subgraphs indexar? +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -Os Indexadores podem se diferenciar ao aplicar técnicas avançadas para decidir indexações de subgraph, mas para dar uma ideia geral, vamos discutir várias métricas importantes usadas para avaliar subgraphs na rede: +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Sinal de curadoria** — A proporção do sinal de curadoria na rede aplicado a um subgraph particular mede bem o interesse naquele subgraph; especialmente durante a fase de inicialização, quando o volume de consultas começa a subir. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Taxas de query coletadas** — Os dados históricos para o volume de taxas de query coletadas para um subgraph específico indicam bem a demanda futura. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Quantidade em staking** - Ao monitorar o comportamento de outros Indexadores ou inspecionar proporções de stake total alocados a subgraphs específicos, um Indexador pode monitorar a reserva para queries nos subgraphs, para identificar subgraphs nos quais a rede mostra confiança ou subgraphs que podem necessitar de mais reservas. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -- **Subgraphs sem recompensas de indexação** - Alguns subgraphs não geram recompensas de indexação, principalmente porque eles usam recursos não apoiados, como o IPFS, ou porque consultam outra rede fora da mainnet. Se um subgraph não estiver a gerar recompensas de indexação, o Indexador será notificado a respeito. +### What are the hardware requirements? -### Quais são os requisitos de hardware? +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -- **Pequeno** — O suficiente para começar a indexar vários subgraphs. Provavelmente precisará de expansões. -- **Normal** — Setup normal. Este é o usado nos exemplos de manifests de lançamento k8s/terraform. -- **Médio** — Indexador de Produção. Apoia 100 subgraphs e 200 – 500 pedidos por segundo. -- **Grande** — Preparado para indexar todos os subgraphs usados atualmente e servir pedidos para o tráfego relacionado. +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -| Setup | Postgres
(CPUs) | Postgres
(memória em GBs) | Postgres
(disco em TBs) | VMs
(CPUs) | VMs
(memória em GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Pequeno | 4 | 8 | 1 | 4 | 16 | -| Normal | 8 | 30 | 1 | 12 | 48 | -| Médio | 16 | 64 | 2 | 32 | 64 | -| Grande | 72 | 468 | 3.5 | 48 | 184 | +### What are some basic security precautions an Indexer should take? -### Há alguma precaução básica de segurança que um Indexador deve tomar? +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Carteira de operador** — Configurar uma carteira de operador é importante, pois permite a um Indexador manter a separação entre as suas chaves que controlam o stake e aquelas no controlo das operações diárias. Mais informações em [Staking no Protocolo](/indexing/overview/#stake-in-the-protocol). +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -- **Firewall** - O serviço de Indexadores é o único que precisa ser exposto publicamente, e o trancamento de portas de admin e acesso ao banco de dados exigem muito mais atenção: o endpoint JSON-RPC do Graph Node (porta padrão: 8030), o endpoint da API de gerenciamento do Indexador (porta padrão: 18000), e o endpoint do banco de dados Postgres (porta padrão: 5432) não devem ser expostos. +## Infrastructure -## Infraestutura +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -O núcleo da infraestrutura de um Indexador é o Graph Node, que monitora as redes indexadas, extrai e carrega dados por uma definição de um subgraph, e o serve como uma [API GraphQL](/about/#how-the-graph-works). O Graph Node deve estar conectado a endpoints que expõem dados de cada rede indexada; um node IPFS para abastecer os dados; um banco de dados PostgreSQL para o seu armazenamento; e componentes de Indexador que facilitem as suas interações com a rede. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Banco de dados PostgreSQL** — O armazenamento principal para o Graph Node, onde dados de subgraph são armazenados. O serviço e o agente indexador também usam o banco de dados para armazenar dados de canal de estado, modelos de custo, regras de indexação, e ações de alocação. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **Endpoint de dados** — Para redes compatíveis com EVMs, o Graph Node deve estar conectado a um endpoint que expõe uma API JSON-RPC compatível com EVMs. Isto pode ser um único cliente, ou um setup mais complexo que carrega saldos em várias redes. É importante saber que certos subgraphs exigirão capabilidades particulares de clientes, como um modo de arquivo e uma API de rastreamento. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Node IPFS (versão menor que 5)** — Os metadados de lançamento de subgraph são armazenados na rede IPFS. O Graph Node acessa primariamente o node IPFS durante o lançamento do subgraph, para retirar o manifest e todos os arquivos ligados. Indexadores de rede não precisam hospedar seu próprio node IPFS, pois já há um hospedado para a rede em https://ipfs.network.thegraph.com. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Serviço de Indexador** — Cuida de todas as comunicações externas com a rede requeridas. Divide modelos de custo e estados de indexação, passa pedidos de consulta de gateways para um Graph Node, e monitora os pagamentos de consulta através de canais de estado com o gateway. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Agente Indexador** — Facilita as interações de Indexadores on-chain, incluindo cadastros na rede, gestão de lançamentos de Subgraph ao(s) seu(s) Graph Node(s), e gestão de alocações. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -- **Servidor de métricas Prometheus** — O Graph Node e os componentes de Indexador logam suas métricas ao servidor de métricas. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -Nota: Para apoiar o escalamento ágil, recomendamos que assuntos de consulta e indexação sejam separados entre conjuntos diferentes de nodes: nodes de consulta e nodes de indexação. +### Ports overview -### Resumo das portas - -> **Importante:** Cuidado ao expor portas publicamente — as **portas de administração** devem ser trancadas a sete chaves. Isto inclui o endpoint JSON-RPC do Graph Node e os pontos finais de gestão de Indexador detalhados abaixo. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Porta | Propósito | Rotas | Argumento CLI | Variável de Ambiente | -| --- | --- | --- | --- | --- | -| 8000 | Servidor HTTP GraphQL
(para consultas de subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | WS GraphQL
(para inscrições a subgraphs) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(para gerir lançamentos) | / | --admin-port | - | -| 8030 | API de status de indexamento do subgraph | /graphql | --index-node-port | - | -| 8040 | Métricas Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Serviço Indexador +#### Indexer Service -| Porta | Propósito | Rotas | Argumento CLI | Variável de Ambiente | -| --- | --- | --- | --- | --- | -| 7600 | Servidor HTTP GraphQL
(para consultas de subgraph pagas) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Agente Indexador +#### Indexer Agent -| Porta | Propósito | Rotas | Argumento CLI | Variável de Ambiente | -| ----- | -------------------------- | ----- | ------------------------- | --------------------------------------- | -| 8000 | API de gestão de Indexador | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Preparando uma infraestrutura de servidor usando o Terraform no Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Nota: Como alternativa, os Indexadores podem usar o AWS, Microsoft Azure, ou Alibaba. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Pré-requisitos para a instalação +#### Install prerequisites - Google Cloud SDK -- Ferramenta de linha de comando Kubectl +- Kubectl command line tool - Terraform -#### Como criar um projeto no Google Cloud +#### Create a Google Cloud Project -- Clone ou navegue ao [repositório de Indexador](https://github.com/graphprotocol/indexer). +- Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). -- Navegue ao diretório `./terraform`, é aqui onde todos os comandos devem ser executados. +- Navigate to the `./terraform` directory, this is where all commands should be executed. ```sh cd terraform ``` -- Autentique como Google Cloud e crie um novo projeto. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Use a página de cobrança do Google Cloud Console para configurar a cobrança para o novo projeto. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Crie uma configuração no Google Cloud. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Ligue as APIs requeridas do Google Cloud. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Crie uma conta de serviço. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Ligue o peering entre o banco de dados e o cluster Kubernetes, que será criado no próximo passo. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,35 +249,35 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Crie o arquivo de configuração mínimo no terraform (atualize quando necessário). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < \ -f Dockerfile.indexer-service \ -t indexer-service:latest \ -# Agente indexador +# Indexer agent docker build \ --build-arg NPM_TOKEN= \ -f Dockerfile.indexer-agent \ -t indexer-agent:latest \ ``` -- Execute os componentes +- Run the components ```sh docker run -p 7600:7600 -it indexer-service:latest ... docker run -p 18000:8000 -it indexer-agent:latest ... ``` -**NOTA**: Após iniciar os containers, o serviço Indexador deve ser acessível no [http://localhost:7600](http://localhost:7600) e o agente indexador deve expor a API de gestão de Indexador no [http://localhost:18000/](http://localhost:18000/). +**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). -#### Usando K8s e Terraform +#### Using K8s and Terraform -Veja a seção sobre [preparar infraestruturas de servidor usando o Terraform no Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) +See the [Setup Server Infrastructure Using Terraform on Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) section -#### Uso +#### Usage -> **NOTA**: Todas as variáveis de configuração de runtime (tempo de execução) podem ser aplicadas como parâmetros ao comando na inicialização, ou usando variáveis de ambiente do formato `COMPONENT_NAME_VARIABLE_NAME`(por ex. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### Agente Indexador +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Serviço Indexador +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -516,56 +516,56 @@ graph-indexer-service start \ #### Indexer CLI -O Indexer CLI é um plugin para o [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli), acessível no terminal em `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Gestão de Indexador usando o Indexer CLI +#### Indexer management using Indexer CLI -O programa recomendado para interagir com a **API de Gestão de Indexador** é o **Indexer CLI**, uma extensão ao **Graph CLI**. O agente precisa de comandos de um Indexador para poder interagir de forma autônoma com a rede em nome do Indexer. Os mecanismos que definem o comportamento de um agente indexador são **gestão de alocações** e **regras de indexamento**. No modo automático, um Indexador pode usar **regras de indexamento** para aplicar estratégias específicas para a escolha de subgraphs para indexar e servir consultas. Regras são gerenciadas através de uma API GraphQL servida pelo agente, e conhecida como a API de Gestão de Indexador. No modo manual, um Indexador pode criar ações de alocação usando a **fila de ações**, além de aprová-las explicitamente antes de serem executadas. Sob o modo de supervisão, as **regras de indexação** são usadas para popular a **fila de ações** e também exigem aprovação explícita para executar. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Uso +#### Usage -O **Indexer CLI** se conecta ao agente indexador, normalmente através do redirecionamento de portas, para que a CLI não precise ser executada no mesmo servidor ou cluster. Para começar mais facilmente, e para fins de contexto, a CLI será descrita brevemente aqui. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` - Conecta à API de gestão de Indexador. Tipicamente, a conexão ao servidor é aberta através do redirecionamento de portas, para que a CLI possa ser operada remotamente com facilidade. (Exemplo: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Mostra uma ou mais regras de indexação usando `all` como o `` para mostrar todas as regras, ou `global` para exibit os padrões globais. Um argumento adicional `--merged` pode ser usado para especificar que regras, específicas ao lançamento, estão fundidas com a regra global. É assim que elas são aplicadas no agente de Indexador. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - Configura uma ou mais regras de indexação. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Começa a indexar um lançamento de subgraph, se disponível, e configura a sua `decisionBasis` para `always`, para que o agente indexador sempre escolha indexá-lo. Caso a regra global seja configurada para always, todos os subgraphs disponíveis na rede serão indexados. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` — Para de indexar um lançamento e configura a sua `decisionBasis` em `never`, com o fim de pular este lançamento ao decidir quais lançamentos indexar. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` — Configura a `decisionBasis` de um lançamento para obedecer o `rules`, comandando o agente indexador a usar regras de indexação para decidir se este lançamento será ou não indexado. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` - Fila da ação de alocação +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` - Fila de realocação de ação +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` - Desaloca ação na fila +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` — Cancela todas as ações na fila se a id não for especificada, caso contrário, cancela o arranjo de id com espaço como separador +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - Aprova múltiplas ações para execução +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` — Força o trabalhador a executar ações aprovadas imediatamente +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Todos os comandos que mostram regras no resultado podem escolher entre os formatos de resultado (`table`, `yaml`, e `json`) usando o argumento `-output`. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### Regras de indexação +#### Indexing rules -As regras de indexação podem ser aplicadas como padrões globais ou para lançamentos de subgraph específicos com o uso das suas IDs. Os campos `deployment` e `decisionBasis` são obrigatórios, enquanto todos os outros campos são opcionais. Quando uma regra de indexação tem `rules` como a `decisionBasis`, então o agente de Indexador comparará valores de limiar não-nulos naquela regra com valores retirados da rede para o lançamento correspondente. Se o lançamento do subgraph tiver valores acima (ou abaixo) de todos os limiares, ele será escolhido para a indexação. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Por exemplo: se a regra global tem um `minStake` de **5** (GRT), qualquer lançamento de subgraph que tiver mais de 5 (GRT) de stake alocado nele será indexado. Regras de limiar incluem `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, e `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Modelo de dados: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -Exemplos de uso de regra de indexação: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### CLI de fila de ações +#### Actions queue CLI -O indexer-cli providencia um módulo `actions` para trabalhar manualmente com a fila de ações. Ele interage com a fila de ações através do **API GraphQL** hospedado pelo servidor de gestão de indexador. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -O trabalhador de execução de ações só retirará itens da fila para execução se eles tiverem o `ActionStatus = approved`. No caminho recomendado, as ações são adicionadas à fila com `ActionStatus = queued`, para que sejam depois aprovadas para serem executadas on-chain. O fluxo geral parecerá com isto: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Ação adicionada à fila por ferramenta de otimização de terceiros ou utilizador do indexer-cli -- O Indexador pode usar o `indexer-cli` para visualizar todas as ações enfileiradas -- O Indexador (ou outro software) pode aprovar ou cancelar ações na fila usando o `indexer-cli`. Os comandos de aprovação e cancelamento aceitam um arranjo de Ids de ação como comando. -- O trabalhador de execução consulta a fila regularmente para ações aprovadas. Ele tomará as ações `approved` da fila, tentará executá-las, e atualizará os valores no banco de dados a depender do estado da execução, sendo `success` ou `failed`. -- Se uma ação tiver êxito, o trabalhador garantirá a presença de uma regra de indexação que diga ao agente como administrar a alocação dali em diante, por mais conveniência ao tomar ações manuais enquanto o agente está no modo `auto` ou `oversight`. -- O indexador pode monitorizar a fila de ações para ver um histórico de execuções de ação, e se necessário, aprovar novamente e atualizar itens de ação caso a sua execução falhe. A fila de ações provém um histórico de todas as ações agendadas e tomadas. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Modelo de dados: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Exemplo de uso da fonte: +Example usage from source: ```bash graph indexer actions get all @@ -677,142 +677,141 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Note que os tipos apoiados de ações para gestão de alocação têm requisitos diferentes de entrada: +Note that supported action types for allocation management have different input requirements: -- `Allocate` — aloca stakes a um lançamento de subgraph específico +- `Allocate` - allocate stake to a specific subgraph deployment - - parâmetros de ação exigidos: + - required action params: - deploymentID - amount -- `Unallocate` — fecha uma alocação, liberando o stake para ser realocado em outro lugar +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - parâmetros de ação exigidos: + - required action params: - allocationID - deploymentID - - parâmetros de ação opcionais: + - optional action params: - poi - - force (força o uso do POI providenciado, mesmo se ele não corresponder ao providenciado pelo graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` — fecha a alocação automaticamente e abre uma alocação nova para o mesmo lançamento de subgraph +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - parâmetros de ação exigidos: + - required action params: - allocationID - deploymentID - amount - - parâmetros de ação opcionais: + - optional action params: - poi - - force (força o uso do POI providenciado mesmo se ele não corresponder ao que o graph-node providencia) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Modelos de custo +#### Cost models -Modelos de custo servem preços dinâmicos para queries, com base em atributos de mercado e query. O Serviço de Indexador compartilha um modelo de custo com os gateways para cada subgraph, aos quais ele pretende responder a consultas. Os gateways, por sua vez, usam o modelo de custo para decidir seleções de Indexador por query e para negociar pagamentos com Indexadores escolhidos. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -A linguagem Agora providencia um formato flexível para a declaração de modelos de custo para queries. Um modelo de preço do Agora é uma sequência de declarações, executadas em ordem, para cada query de alto-nível em um query no GraphQL. Para cada query de nível máximo, a primeira declaração correspondente determina o preço para o tal query. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Uma declaração consiste de um predicado, que é usado para corresponder a buscas GraphQL; e uma expressão de custo que, quando avaliada, mostra um custo em GRT decimal. Valores na posição de argumento nomeada em um query podem ser capturados no predicado e usados na expressão. Globais também podem ser configurados e substituídos por valores temporários em uma expressão. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Exemplo de modelo de custo: +Example cost model: ``` -# Esta declaração captura o valor de pulo, -# usa uma expressão boolean no predicado para corresponder a consultas específicas que usam 'skip' -# e uma expressão de custo para calcular o custo baseado no valor 'skip' e no global SYSTEM_LOAD -SYSTEM_LOAD global +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; -# Este padrão corresponderá a qualquer expressão GraphQL. -# Ele usa um Global substituído na expressão para calcular o custo +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost default => 0.1 * $SYSTEM_LOAD; ``` -Exemplo de custo de query usando o modelo acima: +Example query costing using the above model: -| Consulta | Preço | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Aplicação do modelo de custo +#### Applying the cost model -Os modelos de custo são aplicados através do Indexer CLI, que os repassa à API de Gestão do agente de Indexador para armazenamento no banco de dados. O Serviço de Indexador depois irá localizar e servir os modelos de custo para gateways, sempre que eles forem requisitados. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interações com a rede +## Interacting with the network -### Stake no protocolo +### Stake in the protocol -Os primeiros passos para participar como Indexador consistem em aprovar o protocolo, fazer staking de fundos, e (opcionalmente) preparar um endereço de operador para interações ordinárias do protocolo. +The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. -> Nota: Para os propósitos destas instruções, o Remix será usado para interação com contratos, mas é possível escolher a sua própria ferramenta ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/) e [MyCrypto](https://www.mycrypto.com/account) são algumas outras ferramentas conhecidas). +> Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -Quando um Indexador faz stake de GRT no protocolo, será possível iniciar os seus [componentes](/indexing/overview/#indexer-components) e começar as suas interações com a rede. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Aprovação de tokens +#### Approve tokens -1. Abra o [app Remix](https://remix.ethereum.org/) em um navegador +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. No `File Explorer`, crie um arquivo chamado **GraphToken.abi** com a [Token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). -3. Com o `Staking.abi` selecionado e aberto no editor, entre na seção com `Deploy and Run Transactions` na interface do Remix. +3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. Na opção de ambiente `Injected Web3`, e sob `Account`, selecione o seu endereço de Indexador. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Configure o endereço de contrato de GraphToken; cole o endereço de contrato do GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) próximo ao `At Address` e clique no botão `At address` para aplicar. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Chame a função `approve(spender, amount)` para aprovar o contrato de Staking. Preencha a lacuna `spender`, que tem o endereço de contrato de Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`), e a `amount` com os tokens a serem colocados (em wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Staking de tokens +#### Stake tokens -1. Abra o [app Remix](https://remix.ethereum.org/) em um navegador +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. No `File Explorer`, crie um arquivo chamado **Staking.abi** com a ABI de staking. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. -3. Com o `Staking.abi` selecionado e aberto no editor, entre na seção com `Deploy and Run Transactions` na interface do Remix. +3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. Na opção de ambiente `Injected Web3`, e sob `Account`, selecione o seu endereço de Indexador. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Configure o endereço de contrato de Staking; cole o endereço de contrato do Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) próximo ao `At Address` e clique no botão `At address` para aplicar. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Chame o `stake()` para fazer stake de GRT no protocolo. +6. Call `stake()` to stake GRT in the protocol. -7. (Opcional) Indexadores podem aprovar outro endereço para operar sua infraestrutura de Indexador, a fim de poder separar as chaves que controlam os fundos daquelas que realizam ações rotineiras, como alocar em subgraphs e servir queries (pagos). Para configurar o operador, chame o `setOperator()` com o endereço do operador. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (Opcional) Para controlar a distribuição de recompensas e atrair Delegantes estrategicamente, os Indexadores podem atualizar os seus parâmetros de delegação atualizando o seu indexingRewardCut (partes por milhão); queryFeeCut (partes por milhão); e cooldownBlocks (número de blocos). Para fazer isto, chame o `setDelegationParameters()`. O seguinte exemplo configura o queryFeeCut para distribuir 95% de rebates de query ao Indexador e 5% aos Delegantes; configura o indexingRewardCutto para distribuir 60% de recompensas de indexação ao Indexador e 40% aos Delegantes; e configura o período do `thecooldownBlocks` para 500 blocos. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) ``` -### Configuração de parâmetros de delegação +### Setting delegation parameters -A função `setDelegationParameters()` no [contrato de staking](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol) é essencial para Indexadores; esta permite configurar parâmetros que definem as suas interações com Delegantes, o que influencia a sua capacidade de delegação e divisa de recompensas. +The `setDelegationParameters()` function in the [staking contract](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol) is essential for Indexers, allowing them to set parameters that define their interactions with Delegators, influencing their reward sharing and delegation capacity. -### Como configurar parâmetros de delegação +### How to set delegation parameters -Para configurar os parâmetros de delegação com a interface do Graph Explorer, siga os seguintes passos: +To set the delegation parameters using Graph Explorer interface, follow these steps: -1. Navegue para o [Graph Explorer](https://thegraph.com/explorer/). -2. Conecte a sua carteira. Escolha a multisig (por ex., Gnosis Safe) e depois selecione a mainnet. Nota: Será necessário repetir este processo para o Arbitrum One. -3. Conecte a sua carteira como signatário. -4. Navegue até a seção 'Settings' (Configurações) e selecione 'Delegation Parameters' (Parâmetros de Delegação). Estes parâmetros devem ser configurados para alcançar uma parte efetiva dentro do alcance desejado. Após preencher os campos com valores, a interface calculará automaticamente a parte efetiva. Ajuste estes valores como necessário para obter a percentagem de parte efetiva desejada. -5. Envie a transação à rede. +1. Navigate to [Graph Explorer](https://thegraph.com/explorer/). +2. Connect your wallet. Choose multisig (such as Gnosis Safe) and then select mainnet. Note: You will need to repeat this process for Arbitrum One. +3. Connect the wallet you have as a signer. +4. Navigate to the 'Settings' section and select 'Delegation Parameters'. These parameters should be configured to achieve an effective cut within the desired range. Upon entering values in the provided input fields, the interface will automatically calculate the effective cut. Adjust these values as necessary to attain the desired effective cut percentage. +5. Submit the transaction to the network. -> Nota: Esta transação deverá ser confirmada pelos signatários da carteira multisig. +> Note: This transaction will need to be confirmed by the multisig wallet signers. -### A vida de uma alocação +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Ativa** - Quando uma alocação é criada on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)), ela é considerada **ativa**. Uma porção do stake próprio e/ou delegado do Indexador é alocada a um lançamento de subgraph, que lhe permite resgatar recompensas de indexação e servir queries para aquele lançamento de subgraph. O agente indexador cria alocações baseada nas regras do Indexador. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. -- **Fechada** - Um Indexador pode fechar uma alocação após a passagem de um epoch ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)), ou o seu agente indexador a fechará automaticamente após o **maxAllocationEpochs** (atualmente, 28 dias). Quando uma alocação é fechada com uma prova de indexação válida (POI), as suas recompensas de indexação são distribuídas ao Indexador e aos seus Delegantes ([aprenda mais](/indexing/overview/#how-are-indexing-rewards-distributed)). +- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -É ideal que os Indexadores utilizem a funcionalidade de sincronização off-chain para sincronizar lançamentos de subgraph à chainhead antes de criar a alocação on-chain. Esta ferramenta é mais útil para subgraphs que demorem mais de 28 epochs para sincronizar, ou que têm chances de falhar não-deterministicamente. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 607ffec7afddbbda92a1c95699ca35a65a475998 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:46 -0500 Subject: [PATCH 0081/1534] New translations overview.mdx (Russian) --- website/src/pages/ru/indexing/overview.mdx | 404 ++++++++++----------- 1 file changed, 202 insertions(+), 202 deletions(-) diff --git a/website/src/pages/ru/indexing/overview.mdx b/website/src/pages/ru/indexing/overview.mdx index 117364f39c6c..837c32976c04 100644 --- a/website/src/pages/ru/indexing/overview.mdx +++ b/website/src/pages/ru/indexing/overview.mdx @@ -1,44 +1,45 @@ --- -title: Индексирование +title: Обзор индексирования +sidebarTitle: Обзор --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. +Индексаторы — это операторы нод в сети The Graph, которые стейкают токены Graph (GRT) для предоставления услуг индексирования и обработки запросов. Индексаторы получают оплату за запросы и вознаграждение за свои услуги индексирования. Они также получают комиссию за запросы, которая возвращаются в соответствии с экспоненциальной функцией возврата. Токены GRT, которые застейканы в протоколе, подлежат периоду "оттаивания" и могут быть срезаны, если индексаторы являются вредоносными и передают неверные данные приложениям или если они некорректно осуществляют индексирование. Индексаторы также получают вознаграждение за делегированный стейк от делегаторов, внося свой вклад в работу сети. Индексаторы выбирают подграфы для индексирования на основе сигналов от кураторов, в которых кураторы стейкают токены GRT, чтобы обозначить, какие подграфы являются высококачественными и заслуживают приоритетного внимания. Потребители (к примеру, приложения) также могут задавать параметры, по которым индексаторы обрабатывают запросы к их подграфам, и устанавливать предпочтения по цене за запрос. -## Часто задаваемые вопросы +## FAQ -### Какова минимальная величина стейка, требуемая для того, чтобы быть индексатором в сети? +### What is the minimum stake required to be an Indexer on the network? -Минимальная величина стейка для индексатора в настоящее время установлена ​​на уровне 100 000 GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### Каковы источники доходов индексатора? +### What are the revenue streams for an Indexer? -**Query fee rebates** - платежи за обслуживание запросов в сети. Эти платежи осуществляются через state каналы между индексатором и межсетевым шлюзом. Каждый запрос от шлюза содержит платеж, а ответ - доказательство достоверности результата запроса. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexing rewards**. Вознаграждения за индексирование генерируются за счет 3% годовой инфляции в рамках всего протокола и распределяются между индексаторами, индексирующими развертывание подграфов в сети. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### Как распределяются вознаграждения за индексацию? +### How are indexing rewards distributed? -Вознаграждения за индексацию поступают от инфляции протокола, которая установлена на 3% в год. Оно распределяется между подграфами в зависимости от соотношения всех сигналов на каждом из них, а затем пропорционально распределяется между индексаторами в зависимости от их выделенного стейка на этом подграфе. **Чтобы получить право на вознаграждение, распределение должно быть закрыто достоверным доказательством индексации (POI), соответствующим стандартам, установленным arbitration charter.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### Что такое подтверждение индексации (proof of indexing - POI)? +### What is a proof of indexing (POI)? -POI (подтверждение индексации) используются в сети для проверки того, индексирует ли индексатор назначенные им подграфы. При закрытии распределения необходимо предоставить POI для первого блока текущей эпохи, чтобы это распределение имело право на индексацию наград. POI для блока — это дайджест для всех транзакций хранилища объектов для данного развертывания подграфа до этого блока включительно. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### Когда распределяются вознаграждения за индексацию? +### When are indexing rewards distributed? -Награды за распределения постоянно накапливаются, пока они активны и распределяются в течение 28 эпох. Награды собираются и распределяются индексаторами после того, как их распределение закрыто. Это делается либо вручную, если индексатор выбирает принудительное закрытие, либо через 28 эпох делегатор может закрыть выделение для индексатора, но это не приводит к вознаграждению. 28 эпох — это максимальное время жизни распределения (сейчас одна эпоха длится ~ 24 часа). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### Каким образом можно отслеживать ожидаемые вознаграждения за индексацию? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Многие панели мониторинга, созданные сообществом, содержат ожидающие значения вознаграждений, и их можно легко проверить вручную, выполнив следующие действия: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,129 +57,128 @@ query indexerAllocations { } ``` -Используйте Etherscan для вызова `getRewards()`: +Use Etherscan to call `getRewards()`: -- Перейдите в [интерфейсе Etherscan к контракту Rewards](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* Чтобы вызвать `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - Введите во входные данные **allocationID**. - - Нажмите кнопку **Query**. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### Что такое споры и где их можно просмотреть? +### What are disputes and where can I view them? -Запросы и распределения индексатора могут быть оспорены на The Graph в течение периода спора. Срок спора варьируется в зависимости от типа спора. Запросы/аттестации имеют 7 эпох спорного окна, тогда как распределения имеют 56 эпох. По истечении этих периодов споры не могут быть открыты ни в отношении выделений, ни в отношении запросов. При открытии спора от Рыбаков требуется депозит в размере не менее 10 000 GRT, который будет заблокирован до завершения спора и принятия решения. Рыбаки — это любые участники сети, которые открывают споры. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -У споров есть **три** возможных исхода, как и у депозита Рыбаков. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- Если спор отклонен, то GRT, внесенные рыбаками, будут сожжены, а оспариваемый индексатор не будет урезан. -- Если спор будет решен в виде ничьей, депозит рыбака будет возвращен, а спорный индексатор не будет урезан. -- Если спор будет принят, GRT, внесенные рыбаками, будут возвращены, спорный индексатор будет урезан, а рыбаки получат 50% от урезанных GRT. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -Споры можно просмотреть в пользовательском интерфейсе на странице профиля индексатора на вкладке `Disputes`. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### Что такое query fee rebates и когда они распределяются? +### What are query fee rebates and when are they distributed? -Плата за запрос взимается шлюзом и распределяется между индексаторами в соответствии с экспоненциальной функцией скидки (см. GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). Экспоненциальная функция скидки предлагается как способ гарантии, что индексаторы достигают наилучшего результата за счет добросовестного обслуживания запросов. Это работает, стимулируя индексаторов выделять большую сумму ставки (которая может быть уменьшена за ошибку при обслуживании запроса) относительно суммы комиссии за запрос, которую они могут собрать. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### Что такое query fee cut и indexing reward cut? +### What is query fee cut and indexing reward cut? -Значения `queryFeeCut` и `indexingRewardCut` — это параметры делегирования, которые индексатор может установить вместе с cooldownBlocks для управления распределением GRT между индексатором и его делегаторами. См. последние шаги в разделе [Стейкинг в протоколе](/indexing/overview/#stake-in-the-protocol), чтобы получить инструкции по настройке параметров делегирования. +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. - **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### Как индексаторы узнают, какие подграфы индексировать? +### How do Indexers know which subgraphs to index? -Индексаторы могут отличаться друг от друга, применяя передовые методы принятия решений об индексации подграфов, но чтобы дать общее представление, мы обсудим несколько ключевых показателей, используемых для оценки подграфов в сети: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **Curation signal**. Доля сигнала курирования сети, примененного к определенному подграфу, является хорошим индикатором интереса к этому подграфу, особенно на этапе начальной загрузки, когда объём запросов увеличивается. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Query fees collected**. Исторические данные об объеме сборов за запросы, собранные для определенного подграфа, являются хорошим индикатором будущего спроса. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Amount staked**. Наблюдение за поведением других индексаторов или просмотр соотношения общего стейка, выделяемого на конкретные подграфы, может позволить индексатору отслеживать предложение запросов к подграфам, чтобы определить подграфы, к которым сеть проявляет доверие, либо подграфы, которые нуждаются в большем предложении. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards**. Некоторые подграфы не генерируют вознаграждение за индексирование главным образом потому, что они используют неподдерживаемые функции, такие как IPFS, или потому что они запрашивают другую сеть за пределами основной сети. Вы увидите сообщение в подграфе, если он не генерирует вознаграждение за индексацию. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### Каковы требования к аппаратному обеспечению? +### What are the hardware requirements? -- **Small** — достаточно, чтобы начать индексирование нескольких подграфов, вероятно, потребуется его расширение. -- **Standard** — настройка по умолчанию, это то, что используется в примерах манифестов развертывания k8s/terraform. -- **Medium** – рабочий индексатор, поддерживающий 100 подграфов и 200–500 запросов в секунду. -- **Large** – готовность индексировать все используемые в настоящее время подграфы и обслуживать запросы на соответствующий трафик. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Настройка | Postgres
(ЦП) | Postgres
(память в ГБ) | Postgres
(диск в ТБ) | VMs
(ЦП) | VMs
(память в ГБ) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### Какие основные меры безопасности следует предпринять индексатору? +### What are some basic security precautions an Indexer should take? -- **Operator wallet**. Настройка кошелька оператора является важной мерой безопасности, поскольку она позволяет индексатору поддерживать разделение между своими ключами, которые контролируют величину стейка, и теми, которые контролируют ежедневные операции. Инструкции см. в разделе [Stake in Protocol](/indexing/overview/#stake-in-the-protocol). +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Firewall**. Только служба индексатора должна быть общедоступной, и особое внимание следует уделить блокировке портов администратора и доступа к базе данных: эндпоинт ноды The Graph JSON-RPC (порт по умолчанию: 8030), API эндпоинт для управления индексатором (порт по умолчанию: 18000) и эндпоинт базы данных Postgres (порт по умолчанию: 5432) не должны быть доступны. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. ## Infrastructure -В центре инфраструктуры индексатора находится нода the Graph, которая отслеживает индексированные в сети, извлекает и загружает данные в соответствии с определением подграфа и служит в качестве [GraphQL API](/about/#how-the-graph-works). The Graph Node должна быть подключена к эндпоинту, предоставляющему данные из каждой индексированной сети; к ноде IPFS для получения данных; к базе данных PostgreSQL для своего хранилища; и компонентам индексатора, облегчающим его взаимодействие с сетью. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** — основное хранилище для the Graph Node, здесь хранятся данные подграфа. Служба и агент индексатора также используют базу данных для хранения данных о каналах состояний, моделей затрат, правил индексирования и действий по распределению. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** – Для сетей, совместимых с EVM, Graph Node должна быть подключена к эндпоинту, предоставляющему API-интерфейс JSON-RPC, совместимый с EVM. Это может быть как один клиент, так и более сложная конфигурация, которая распределяет нагрузку между несколькими клиентами. Важно знать, что для некоторых подграфов потребуются определенные клиентские возможности, такие как режим архива и/или API для отслеживания четности. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS node (версия ниже 5)** – Метаданные развертывания подграфа хранятся в сети IPFS. The Graph Node в первую очередь обращается к ноде IPFS во время развертывания подграфа для получения манифеста подграфа и всех связанных файлов. Индексаторам сети не нужно хостить свой собственную ноду IPFS, нода IPFS для сети находится на https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Indexer service**. Обрабатывает все необходимые внешние подключения к сети. Совместно использует модели затрат и статусы индексации, передает заявки на запросы от шлюзов на Graph Node, и управляет платежами по запросам через каналы состояний с помощью шлюза. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent**. Облегчает взаимодействие индексаторов в сети, включая регистрацию в сети, управление развертыванием подграфов на его Graph Node/ах и управление аллокациями. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Prometheus metrics server**. Компоненты Graph Node и индексатора регистрируют свои показатели на сервере данных. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Примечание: Для поддержки динамичного масштабирования рекомендуется разделить задачи запросов и индексирования между разными наборами нод: нодами запросов и нодами индексирования. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Обзор портов +### Ports overview -> **Важно**. Будьте осторожны, открывая порты для общего доступа — **порты администрирования** должны быть заблокированы. Это касается Graph Node JSON-RPC и эндпоинтов для управления индексатором, описанных ниже. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Порт | Назначение | Расположение | CLI-аргумент | Переменная среды | -| --- | --- | --- | --- | --- | -| 8000 | HTTP-сервер GraphQL
(для запросов подграфов) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(для подписок на подграфы) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(для управления процессом развертывания) | / | --admin-port | - | -| 8030 | API для определения статуса индексирования подграфов | /graphql | --index-node-port | - | -| 8040 | Показатели Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Порт | Назначение | Расположение | CLI-аргумент | Переменная среды | -| --- | --- | --- | --- | --- | -| 7600 | HTTP-сервер GraphQL
(для платных запросов к подграфам) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Показатели Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Порт | Назначение | Расположение | CLI-аргумент | Переменная среды | -| --- | --- | --- | --- | --- | -| 8000 | API для управления индексатором | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Настройка серверной инфраструктуры с помощью Terraform в Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Примечание: Индексаторы могут в качестве альтернативы использовать AWS, Microsoft Azure или Alibaba. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Установка предварительного обеспечения +#### Install prerequisites - Google Cloud SDK -- Инструмент командной строки Kubectl +- Kubectl command line tool - Terraform -#### Создайте проект Google Cloud +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Once an allocation has been closed the rebates are available to be claimed by th cd terraform ``` -- Авторизуйтесь в Google Cloud и создайте новый проект. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Используйте страницу биллинга в Google Cloud Console, чтобы включить эту функцию для нового проекта. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Создайте конфигурацию Google Cloud. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Включите необходимые API-интерфейсы Google Cloud. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Создайте сервисный аккаунт. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Активируйте peering между базой данных и кластером Kubernetes, который будет создан на следующем шаге. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,22 +249,22 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Создайте минимальный файл конфигурации terraform (обновляйте его по мере необходимости). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **ПРИМЕЧАНИЕ**. Все переменные конфигурации среды выполнения могут применяться либо в качестве параметров команды при запуске, либо с использованием переменных среды в формате `COMPONENT_NAME_VARIABLE_NAME` (например, `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). #### Indexer agent @@ -516,56 +516,56 @@ graph-indexer-service start \ #### Indexer CLI -Интерфейс командной строки индексатора — это плагин для [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli), доступный в терминале `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Управление индексатором с помощью интерфейса командной строки индексатора +#### Indexer management using Indexer CLI -Рекомендуемым инструментом для взаимодействия с **Indexer Management API** является **Indexer CLI**, расширение для **Graph CLI**. Indexer agent требуются данные от индексатора для автономного взаимодействия с сетью от имени индексатора. Механизмом определения поведения агента индексатора являются режим **allocation management** и **indexing rules**. В автоматическом режиме индексатор может использовать **indexing rules**, чтобы применить свою стратегию выбора подграфов для индексации и обслуживания запросов. Правила управляются через API GraphQL, обслуживаемый агентом и известный как API управления индексатором (Indexer Management API). В ручном режиме индексатор может создавать действия по распределению аллокаций, используя **actions queue** непосредственно утверждать их до того, как они будут выполнены. В режиме контроля **indexing rules** используются для заполнения **actions queue** и также требуют непосредственного утверждения их до того, как они будут выполнены. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Применение +#### Usage -**Indexer CLI** подключается к Indexer agent, как правило, посредством переадресации портов, поэтому CLI не нужно запускать на том же сервере или кластере. Ниже приведено краткое описание интерфейса командной строки. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` — подключение к API для управления индексатором. Обычно соединение с сервером открывается через переадресацию портов, поэтому интерфейсом командной строки можно легко управлять удаленно. (Например: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` — получить одно или несколько indexing rules, используя `all` в качестве ``, чтобы получить все правила, или `global` для получения глобальных значений по умолчанию. Дополнительный аргумент `--merged` может использоваться для указания того, что правила, специфичные для развертывания, объединяются с глобальным правилом. Вот как они применяются в Indexer agent. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` — установите одно или несколько правил индексации. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` – начать индексирование развертывания подграфа, если оно доступно, и установить для его `decisionBasis` значение `always`, в результате Indexer agent будет всегда его индексировать. Если для глобального правила установлено значение «always», то будут проиндексированы все доступные подграфы в сети. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` – остановите индексирование развертывания и задайте для его `decisionBasis` значение «never», в результате оно будет пропускать это развертывание при принятии решения об индексации развертывания. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` — установите для развертывания `decisionBasis` значение `rules`, чтобы Indexer agent смог применить правила индексирования с целью решить, следует ли индексировать это развертывание. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` – действие по распределению аллокаций в виде очереди +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` — действие по перераспределению аллокаций в виде очереди +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` — действие по отмене распределения аллокации в виде очереди +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - Отменить все действия в очереди, если id не указан, иначе отменить array of id с пробелом в качестве разделителя +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - Одобрить выполнение нескольких действий +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` — Заставить сотрудника немедленно выполнить утвержденные действия +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Все команды, которые отображают правила в выходных данных, могут выбирать между поддерживаемыми выходными форматами (`table`, `yaml` и `json`) с помощью аргумента `-output`. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. #### Indexing rules -Правила индексирования могут применяться либо как глобальные значения по умолчанию, либо для определенных развертываний подграфов с использованием их идентификаторов. Поля `deployment` и `decisionBasis` обязательны для заполнения, а все остальные поля — необязательны. Если indexing rule содержит `rules` в качестве `decisionBasis`, Indexer agent сравнивает ненулевые пороговые значения в этом правиле со значениями, полученными из сети для соответствующего развертывания. Если значение развертывания подграфа выше (или ниже) любого из пороговых значений, оно будет выбрано для индексации. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Например, если глобальное правило имеет `minStake` **5** (GRT), любое развертывание подграфа с выделенной ему суммой стейкинга более 5 (GRT) будет проиндексировано. Пороговые правила включают `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake` и `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Модель данных: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -Пример применения правила индексации: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### Очередь действий в CLI +#### Actions queue CLI -indexer-cli предоставляет модуль `actions` для ручного управления очередностью действий. Для взаимодействия с очередностью действий он использует **Graphql API**, размещенный на сервере управления индексатором. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -Обработчик выполнения действий будет извлекать элементы из очереди для выполнения только в том случае, если они имеют `ActionStatus = Approved`. В рекомендуемом пути действия добавляются в очередь с ActionStatus = queued, поэтому они должны быть одобрены для выполнения в сети. Общий поток будет выглядеть следующим образом: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Действие, добавленное в очередь сторонним инструментом оптимизатора или пользователем indexer-cli -- Индексатор может использовать `indexer-cli` для просмотра всех действий, поставленных в очередь -- Индексатор (или другое программное обеспечение) может утверждать или отменять действия в очереди, используя `indexer-cli`. Команды утверждения и отмены принимают на входе массив идентификаторов действий. -- Исполнитель регулярно проверяет очередь на наличие утвержденных действий. Он берёт `approved` действия из очереди, пытается их выполнить и обновляет значения в базе данных в зависимости от статуса выполнения на `success` или `failed`. -- Если действие выполнено успешно, рабочий процесс обеспечит наличие правила индексации, которое сообщает агенту, как управлять распределением в будущем, что полезно при выполнении ручного управления, когда агент находится в режиме `auto` или `oversight`. -- Индексатор может отслеживать очередность действий, просматривая историю выполнения действий и, при необходимости, повторно утвердить и обновить элементы действий, если они не выполнились. Очередность действий содержит историю всех действий, поставленных в очередь и выполненных. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Модель данных: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Пример использования из исходного кода: +Example usage from source: ```bash graph indexer actions get all @@ -677,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Обратите внимание, что поддерживаемые типы действий для управления распределением имеют разные входные требования: +Note that supported action types for allocation management have different input requirements: -- `Allocate` — выделение определенной величины стейкинга для развертывания определённого подграфа +- `Allocate` - allocate stake to a specific subgraph deployment - - Требуемые параметры действия: + - required action params: - deploymentID - amount -- `Unallocate` — закрыть распределение, освобождая величину стейкинга для перераспределения в другом месте +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - Требуемые параметры действия: + - required action params: - allocationID - deploymentID - - Опциональные параметры действия: + - optional action params: - poi - - force (принудительно использует предоставленный POI, даже если он не соответствует тому, что предоставляет graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` — немедленно закрыть распределение и открыть новое распределение для развертывания того же подграфа +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - Требуемые параметры действия: + - required action params: - allocationID - deploymentID - amount - - Опциональные параметры действия: + - optional action params: - poi - - force (принудительно использует предоставленный POI, даже если он не соответствует тому, что предоставляет graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Модели затрат +#### Cost models -Модели затрат обеспечивают динамическое ценообразование для запросов на основе рынка и атрибутов запроса. Служба индексатора делится со шлюзом моделью стоимости для каждого подграфа, для которого они намереваются отвечать на запросы. Шлюзы, в свою очередь, используют модель затрат для принятия решений о выборе индексатора для каждого запроса и согласования оплаты с выбранными индексаторами. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Язык Agora предоставляет гибкий формат для объявления моделей затрат на запросы. Ценовая модель Agora — это последовательность операторов, которые выполняются по порядку для каждого запроса верхнего уровня в запросе GraphQL. Для каждого запроса верхнего уровня цена за этот запрос определяется первым соответствующим ему оператором. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Оператор состоит из predicate, который используется для сопоставления запросов GraphQL, и выражения стоимости, которое при оценке выводит стоимость в десятичном формате GRT. Значения в позиции именованного аргумента запроса могут быть захвачены в predicate и использованы в expression. Глобальные переменные также могут быть установлены и заменены на placeholders в expression. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Пример модели затрат: +Example cost model: ``` # This statement captures the skip value, @@ -727,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -Пример расчета стоимости запроса с использованием приведенной выше модели: +Example query costing using the above model: -| Запрос | Цена | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Применение модели затрат +#### Applying the cost model -Модели затрат применяются через интерфейс командной строки (CLI) индексатора, который передает их в API управления индексатором агента индексатора для сохранения в базе данных. Затем Indexer Service отбирает их и предоставляет модели затрат шлюзам каждый раз, когда они запрашивают их. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Взаимодействие с сетью +## Interacting with the network -### Стейкинг в протоколе +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -После того как индексатор застейкал GRT в протоколе, можно запустить [Indexer components](/indexing/overview/#indexer-components) и начать взаимодействие с сетью. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Подтверждение токенов +#### Approve tokens -1. Откройте [приложение Remix](https://remix.ethereum.org/) в браузере +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. В `File Explorer` создайте файл с именем **GraphToken.abi** с [token ABI](https://raw.githubusercontent.com/graphprotocol /contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. В разделе environment выберите `Injected Web3`, а в разделе `Account` выберите адрес вашего индексатора. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Установите адрес контракта GraphToken. Вставьте адрес контракта GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) рядом с `At Address` и нажмите кнопку `At address`, чтобы применить его. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Вызовите функцию `approve(spender, amount)`, чтобы одобрить Staking контракт. Заполните `spender` адресом контракта стейкинга (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) и `amount` токенами для стейкинга (в wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Стейкинг токенов +#### Stake tokens -1. Откройте [приложение Remix](https://remix.ethereum.org/) в браузере +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. В `File Explorer` создайте файл с именем **Staking.abi** с ABI для стейкинга. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. В разделе environment выберите `Injected Web3`, а в разделе `Account` выберите адрес вашего индексатора. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Установите адрес Staking контракта. Вставьте адрес контракта (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) рядом с `At Address` и нажмите кнопку `At address`, чтобы применить его. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Вызовите `stake()`, чтобы застейкать GRT в протоколе. +6. Call `stake()` to stake GRT in the protocol. -7. (Необязательно) Индексаторы могут утвердить другой адрес в качестве оператора своей инфраструктуры индексатора, чтобы отделить ключи, управляющие средствами, от тех, которые выполняют повседневные действия, такие как выделение аллокаций подграфов и обслуживание (платных) запросов. Чтобы установить оператора, вызовите `setOperator()` с адресом оператора. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (Необязательно) Чтобы контролировать распределение вознаграждений и стратегически привлекать делегаторов, индексаторы могут обновлять свои параметры делегирования, обновляя их indexingRewardCut (части на миллион), queryFeeCut (части на миллион) и cooldownBlocks (количество блоков). Чтобы сделать это, вызовите `setDelegationParameters()`. В следующем примере задается параметр queryFeeCut для распределения 95 % вознаграждений за запросы индексатору и 5 % делегаторам, параметр indexingRewardCut устанавливается для распределения 60 % вознаграждения за индексирование индексатору и 40 % делегатам, а также задается `thecooldownBlocks` период до 500 блоков. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### Срок существования аллокации +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Индексаторам рекомендуется использовать функцию синхронизации вне сети для синхронизации развертывания подграфов в chainhead перед созданием аллокации в сети. Эта функция особенно полезна для подграфов, синхронизация которых может занять более 28 эпох или имеющих некоторую вероятность неопределенного сбоя. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 5ba069006e225aaa9d4b90d555dc0e06b68e0340 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:47 -0500 Subject: [PATCH 0082/1534] New translations overview.mdx (Swedish) --- website/src/pages/sv/indexing/overview.mdx | 458 ++++++++++----------- 1 file changed, 229 insertions(+), 229 deletions(-) diff --git a/website/src/pages/sv/indexing/overview.mdx b/website/src/pages/sv/indexing/overview.mdx index a3d5c9af5fc2..14cc86106efe 100644 --- a/website/src/pages/sv/indexing/overview.mdx +++ b/website/src/pages/sv/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexering +title: Indexing Overview +sidebarTitle: Översikt --- Indexerare är nodoperatörer i The Graph Network som satsar Graph Tokens (GRT) för att tillhandahålla indexering och frågebehandlingstjänster. Indexerare tjänar avgifter för frågor och indexering samt får frågebetalningar som återbetalas enligt en exponentiell återbetalningsfunktion. @@ -10,35 +11,35 @@ Indexerare väljer subgrafer att indexera baserat på subgrafens kuratersignal, ## FAQ -### Vad är det minsta satsade belopp som krävs för att vara en indexerare i nätverket? +### What is the minimum stake required to be an Indexer on the network? -Det minsta beloppet för en indexerare är för närvarande inställt på 100 000 GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### Vad är intäktskällorna för en indexerare? +### What are the revenue streams for an Indexer? -**Frågebetalningsåterbetalningar** - Betalningar för att servera frågor i nätverket. Dessa betalningar medieras via tillståndskanaler mellan en indexerare och en gateway. Varje frågebegäran från en gateway innehåller en betalning och det motsvarande svaret är en bevis på giltigheten av frågeresultatet. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Indexeringsbelöningar** - Genererade genom en årlig protokollsbredd på 3%, fördelas indexerare som indexerar subgrafdepåer för nätverket. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### Hur fördelas indexeringsbelöningar? +### How are indexing rewards distributed? -Indexeringsbelöningar kommer från protokollsinflation som är inställd på en årlig emission på 3%. De fördelas över subgrafer baserat på andelen av all kuratersignal på varje subgraf, och fördelas sedan proportionellt till indexerare baserat på deras tilldelade insats på den subgrafen. **En tilldelning måste avslutas med ett giltigt bevis på indexering (POI) som uppfyller de standarder som fastställts av skiljekommittéstadgan för att vara berättigad till belöningar.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### Vad är ett bevis på indexering (POI)? +### What is a proof of indexing (POI)? -POI:er används i nätverket för att verifiera att en indexerare indexerar de subgrafer de har tilldelat sig. Ett POI för det första blocket i den nuvarande epoken måste lämnas in när en tilldelning stängs för att vara berättigad till indexeringsbelöningar. Ett POI för ett block är en digest för alla entity store-transaktioner för en specifik subgrafdepå fram till och med det blocket. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### När fördelas indexeringsbelöningar? +### When are indexing rewards distributed? -Tilldelningar ackumulerar kontinuerligt belöningar medan de är aktiva och tilldelade inom 28 epoker. Belöningarna samlas in av indexerarna och distribueras när deras tilldelningar stängs. Det sker antingen manuellt, när indexeraren vill tvinga dem att stängas, eller efter 28 epoker kan en Delegat stänga tilldelningen för indexeraren, men detta resulterar inte i några belöningar. 28 epoker är den maximala tilldelningens livslängd (för närvarande varar en epok i cirka ~24h). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### Kan väntande indexeringsbelöningar övervakas? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Många av gemenskapens egentillverkade instrument inkluderar värden för väntande belöningar och de kan enkelt kontrolleras manuellt genom att följa dessa steg: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,129 +57,128 @@ query indexerAllocations { } ``` -Använd Etherscan för att anropa `getRewards()`: +Use Etherscan to call `getRewards()`: -- Navigera till [Etherscan-gränssnittet till belöningskontraktet](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* För att anropa `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - Ange **allocationID** i inmatningen. - - Klicka på **Fråga**-knappen. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### Vad är tvister och var kan jag se dem? +### What are disputes and where can I view them? -Indexerares frågor och tilldelningar kan båda bli föremål för tvister på The Graph under tvisteperioden. Tvisteperioden varierar beroende på typen av tvist. Frågor/erkännanden har en tvistefönster på 7 epocher, medan tilldelningar har 56 epocher. Efter att dessa perioder har passerat kan inga tvister öppnas mot vare sig tilldelningar eller frågor. När en tvist öppnas krävs en insättning av minst 10 000 GRT av Fishermen, som kommer att vara låsta tills tvisten är avslutad och en resolution har lämnats. Fishermen är nätverksdeltagare som öppnar tvister. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -Tvister har **tre** möjliga utfall, liksom insättningen från Fishermen. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- Om tvisten avvisas kommer den GRT som satts in av Fishermen att brännas, och den ifrågasatta Indexern kommer inte att bli straffad. -- Om tvisten avgörs som oavgjord kommer Fishermens insättning att återbetalas, och den ifrågasatta Indexern kommer inte att bli straffad. -- Om tvisten godkänns kommer den GRT som satts in av Fishermen att återbetalas, den ifrågasatta Indexern kommer att bli straffad, och Fishermen kommer att tjäna 50% av den straffade GRT. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -Tvister kan ses i gränssnittet på en Indexers profil under fliken `Tvister`. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### Vad är återbetalningar av frågeavgifter och när distribueras de? +### What are query fee rebates and when are they distributed? -Förfrågningsavgifter samlas in av gateway och fördelas till indexerare enligt den exponentiella rabattfunktionen (se GIP [här](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). Den exponentiella rabattfunktionen föreslås som ett sätt att säkerställa att indexerare uppnår det bästa resultatet genom att troget servera förfrågningar. Den fungerar genom att ge indexerare incitament att allokerar en stor mängd insats (som kan beskäras om de begår fel när de serverar en förfrågan) i förhållande till den mängd förfrågningsavgifter de kan samla in. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -När en tilldelning har avslutats är återbetalningarna tillgängliga för Indexern att hämta. Vid hämtning distribueras frågeavgiftsåterbetalningarna till Indexern och deras Delegatorer baserat på frågeavgiftsminskningen och den exponentiella rabattfunktionen. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### Vad är frågeavgiftsminskning och minskning av indexeringsbelöning? +### What is query fee cut and indexing reward cut? -Värdena `queryFeeCut` och `indexingRewardCut` är delegationparametrar som Indexern kan sätta tillsammans med `cooldownBlocks` för att kontrollera distributionen av GRT mellan Indexern och deras Delegatorer. Se de sista stegen i [Staking i protokollet](/indexing/overview/#stake-in-the-protocol) för anvisningar om att ställa in delegationparametrarna. +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - andelen frågeavgiftsåterbetalningar som kommer att distribueras till Indexern. Om detta är inställt på 95% kommer Indexern att få 95% av de frågeavgifter som tjänas när en tilldelning avslutas, medan de andra 5% fördelas till Delegatorerna. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - andelen indexeringsbelöningar som kommer att distribueras till Indexern. Om detta är inställt på 95% kommer Indexern att få 95% av indexeringsbelöningarna när en tilldelning avslutas, och Delegatorerna kommer att dela de återstående 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### Hur vet Indexers vilka subgrafer de ska indexera? +### How do Indexers know which subgraphs to index? -Indexers kan skilja sig åt genom att tillämpa avancerade tekniker för att fatta beslut om indexering av subgrafer, men för att ge en allmän idé kommer vi att diskutera flera viktiga metoder som används för att utvärdera subgrafer i nätverket: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **Kureringssignal** - Andelen nätverkskureringssignal som används för en specifik subgraf är en bra indikator på intresset för den subgrafen, särskilt under uppstartsfasen när frågevolymen ökar. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Inkassering av frågeavgifter** - Historisk data för volymen av frågeavgifter som samlats in för en specifik subgraf är en bra indikator på framtida efterfrågan. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Insatsbelopp** - Att övervaka beteendet hos andra Indexers eller att titta på andelar av total insats som allokerats till specifika subgrafer kan låta en Indexer övervaka tillgångssidan för subgrafsförfrågningar och identifiera subgrafer som nätverket visar förtroende för eller subgrafer som kan behöva mer tillgång. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Subgrafer utan indexeringsbelöningar** - Vissa subgrafer genererar inte indexeringsbelöningar huvudsakligen eftersom de använder otillåtna funktioner som IPFS eller eftersom de frågar ett annat nätverk utanför mainnet. Du kommer att se ett meddelande på en subgraf om den inte genererar indexeringsbelöningar. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### Vilka är de tekniska kraven? +### What are the hardware requirements? -- **Liten** - Tillräckligt för att komma igång med att indexera flera subgrafer, kommer sannolikt att behöva utökas. -- **Standard** - Standardinställning, detta är vad som används i exempelvis k8s/terraform-implementeringsmanifesten. -- **Medium** - Produktionsindexer som stöder 100 subgrafer och 200-500 förfrågningar per sekund. -- **Stor** - Förberedd för att indexera alla för närvarande använda subgrafer och att ta emot förfrågningar för relaterad trafik. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Konfiguration | Postgres
(CPU:er) | Postgres
(minne i GB) | Postgres
(disk i TB) | VM:er
(CPU:er) | VM:er
(minne i GB) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Liten | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Stor | 72 | 468 | 3,5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### Vilka grundläggande säkerhetsåtgärder bör en Indexer vidta? +### What are some basic security precautions an Indexer should take? -- **Operatörplånbok** - Att skapa en operatörplånbok är en viktig försiktighetsåtgärd eftersom den möjliggör att en Indexer kan upprätthålla separation mellan sina nycklar som styr insatsen och de som är ansvariga för dagliga operationer. Se [Insats i protokollet](/indexing/overview/#stake-in-the-protocol) för anvisningar. +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Brandvägg** - Endast Indexertjänsten behöver vara offentligt exponerad och särskild uppmärksamhet bör ägnas åt att säkra administrativa portar och databasåtkomst: Graph Node JSON-RPC-gränssnittet (standardport: 8030), Indexerhanterings-API-gränssnittet (standardport: 18000) och PostgreSQL-databasgränssnittet (standardport: 5432) bör inte vara exponerade. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Infrastruktur +## Infrastructure -I centrum av en Indexers infrastruktur finns Graph Node, som övervakar de indexerade nätverken, extraherar och laddar data enligt en subgrafdefinition och serverar det som en [GraphQL API](/about/#how-the-graph-works). Graph Node måste vara ansluten till en endpoint som exponerar data från varje indexerat nätverk; en IPFS-nod för att hämta data; en PostgreSQL-databas för lagring; och Indexer-komponenter som underlättar dess interaktioner med nätverket. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL-databas** - Huvudlagret för Graph Node, detta är där subgrafdata lagras. Indexertjänsten och agenten använder också databasen för att lagra state channel-data, kostnadsmodeller, indexeringsregler och tilldelningsåtgärder. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Dataendpoint** - För EVM-kompatibla nätverk måste Graph Node vara ansluten till en endpoint som exponerar en EVM-kompatibel JSON-RPC-API. Detta kan ta form av en enskild klient eller det kan vara en mer komplex konfiguration som balanserar belastningen över flera. Det är viktigt att vara medveten om att vissa subgrafer kan kräva specifika klientfunktioner som arkivläge och/eller parity-spårnings-API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS-nod (version mindre än 5)** - Metadata för subgrafdistribution lagras på IPFS-nätverket. Graph Node har huvudsakligen åtkomst till IPFS-noden under subgrafdistributionen för att hämta subgrafmanifestet och alla länkade filer. Nätverksindexer behöver inte hosta sin egen IPFS-nod, en IPFS-nod för nätverket är värd på https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Indexertjänst** - Hanterar alla nödvändiga externa kommunikationer med nätverket. Delar kostnadsmodeller och indexeringsstatus, skickar frågebegäranden från gateways vidare till en Graph Node och hanterar frågebetalningar via tillståndskanaler med gatewayen. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexeragent** - Underlättar Indexers interaktioner på kedjan, inklusive registrering i nätverket, hantering av subgrafdistributioner till sina Graph Node/noder och hantering av tilldelningar. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Prometheus-metrisk server** - Graph Node och Indexer-komponenter loggar sina metriska data till metrisk servern. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Observera: För att stödja smidig skalning rekommenderas det att fråge- och indexeringsbekymmer separeras mellan olika uppsättningar noder: frågenoder och indexnoder. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Översikt över portar +### Ports overview -> **Viktigt**: Var försiktig med att offentligt exponera portar - **administrativa portar** bör vara säkra. Detta inkluderar JSON-RPC för Graph Node och Indexer-hanteringsendpunkterna som beskrivs nedan. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graf Node -| Port | Syfte | Vägar | CLI-argument | Miljövariabel | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP-server
(för subgraf-förfrågningar) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(för subgraf-prenumerationer) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(för hantering av distributioner) | / | --admin-port | - | -| 8030 | Subgrafindexeringsstatus-API | /graphql | --index-node-port | - | -| 8040 | Prometheus-metrar | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Indexertjänst +#### Indexer Service -| Port | Syfte | Vägar | CLI-argument | Miljövariabel | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP-server
(för betalda subgraf-förfrågningar) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus-metrar | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Indexeragent +#### Indexer Agent -| Port | Syfte | Vägar | CLI-argument | Miljövariabel | -| ---- | --------------------- | ----- | ------------------------- | --------------------------------------- | -| 8000 | Indexerhanterings-API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Konfigurera serverinfrastruktur med Terraform på Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Obs: Indexers kan alternativt använda AWS, Microsoft Azure eller Alibaba. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Installera förutsättningar +#### Install prerequisites -- SDK för Google Cloud -- Kubectl kommandoradsverktyg +- Google Cloud SDK +- Kubectl command line tool - Terraform -#### Skapa ett Google Cloud-projekt +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Observera: För att stödja smidig skalning rekommenderas det att fråge- och in cd terraform ``` -- Autentisera dig med Google Cloud och skapa ett nytt projekt. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Använd faktureringssidan i Google Cloud Console för att aktivera fakturering för det nya projektet. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Skapa en Google Cloud-konfiguration. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Aktivera nödvändiga API:er för Google Cloud. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Skapa ett servicekonto. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Aktivera peering mellan databasen och Kubernetes-klustret som kommer att skapas i nästa steg. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,35 +249,35 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Skapa en minimal konfigurationsfil för terraformen (uppdatera vid behov). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **OBS**: Alla körtidskonfigurationsvariabler kan antingen tillämpas som parametrar till kommandot vid start eller med miljövariabler i formatet `COMPONENT_NAME_VARIABLE_NAME` (ex. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### Indexeragent +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Indexeringstjänst +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -516,56 +516,56 @@ graph-indexer-service start \ #### Indexer CLI -Indexer CLI är ett insticksprogram för [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) tillgängligt i terminalen på `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Indexerhantering med Indexer CLI +#### Indexer management using Indexer CLI -Det föreslagna verktyget för att interagera med **Indexer Management API** är **Indexer CLI**, ett tillägg till **Graph CLI**. Indexeragenten behöver input från en Indexer för att autonomt interagera med nätverket på Indexers vägnar. Mekanismen för att definiera Indexeragentens beteende är **allokeringhantering** och **indexeringsregler**. I automatiskt läge kan en Indexer använda **indexeringsregler** för att tillämpa sin specifika strategi för att välja subgrafer att indexera och utföra frågor för. Regler hanteras via ett GraphQL API som serveras av agenten och kallas Indexer Management API. I manuellt läge kan en Indexer skapa allokationsåtgärder med **åtgärds kö** och godkänna dem explicit innan de utförs. I övervakningsläge används **indexeringsregler** för att fylla **åtgärds kö** och kräver också explicit godkännande för utförande. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Användning +#### Usage -**Indexer CLI** ansluter till Indexeragenten, vanligtvis via port-vidarebefordran, så CLI behöver inte köras på samma server eller kluster. För att hjälpa dig komma igång och ge lite kontext kommer CLI att beskrivas här kortfattat. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` - Anslut till Indexerhanterings-API:et. Vanligtvis öppnas anslutningen till servern via port-vidarebefordran, så CLI kan enkelt användas fjärrstyras. (Exempel: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Hämta en eller flera indexeringsregler med `all` som `` för att hämta alla regler, eller `global` för att hämta de globala standardvärdena. Ett ytterligare argument `--merged` kan användas för att ange att regler specifika för distributionen slås samman med den globala regeln. Detta är hur de tillämpas i Indexeragenten. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - Ange en eller flera indexeringsregler. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Starta indexering av en subgraph-distribution om den är tillgänglig och ange dess `decisionBasis` till `always`, så kommer Indexeragenten alltid att välja att indexera den. Om den globala regeln är inställd på always kommer alla tillgängliga subgrafer på nätverket att indexeras. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` - Stoppa indexeringen av en distribution och ange dess `decisionBasis` till never, så kommer den att hoppa över den här distributionen när den beslutar om distributioner att indexera. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` — Ange `decisionBasis` för en distribution till `rules`, så kommer Indexeragenten att använda indexeringsregler för att avgöra om den ska indexera den här distributionen. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` - Köa allokationsåtgärd +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` - Köa omallokeringsåtgärd +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` - Köa avallokeringsåtgärd +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - Avbryt alla åtgärder i kön om id inte anges, annars avbryt arrayen med id med mellanslag som separator +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - Godkänn flera åtgärder för utförande +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` - Tvinga arbetaren att omedelbart utföra godkända åtgärder +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Alla kommandon som visar regler i utdata kan välja mellan de stödda utdataformaten (`table`, `yaml` och `json`) med hjälp av argumentet `-output`. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### Indexeringsregler +#### Indexing rules -Indexeringsregler kan antingen tillämpas som globala standardvärden eller för specifika subgraph-distributioner med deras ID. Fälten `deployment` och `decisionBasis` är obligatoriska, medan alla andra fält är valfria. När en indexeringsregel har `rules` som `decisionBasis`, jämför Indexeragenten tröskelvärden som inte är null på den regeln med värden som hämtas från nätverket för den motsvarande distributionen. Om subgraph-distributionen har värden över (eller under) någon av tröskelvärdena kommer den att väljas för indexering. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Till exempel, om den globala regeln har en `minStake` på **5** (GRT), kommer vilken subgraph-distribution som helst som har mer än 5 (GRT) satsat på den att indexeras. Tröskelregler inkluderar `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake` och `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Datamodell: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -Exempel på användning av indexeringsregel: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### CLI tail-åtgärder +#### Actions queue CLI -Indexer-cli tillhandahåller ett `actions`-modul för manuellt arbete med åtgärds kön. Det använder **Graphql API** som hostas av indexeringshanteringsservern för att interagera med åtgärds kön. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -Åtgärdsutförande-arbetaren kommer endast att hämta objekt från kön för att utföra om de har `ActionStatus = approved`. På den rekommenderade vägen läggs åtgärder till i kön med ActionStatus = queued, så de måste sedan godkännas för att utföras på kedjan. Den generella flödet kommer att se ut som följer: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Åtgärd läggs till i kön av tredjeparts-optimeringsverktyget eller indexer-cli-användaren -- Indexer kan använda `indexer-cli` för att visa alla köade åtgärder -- Indexer (eller annan programvara) kan godkänna eller avbryta åtgärder i kön med hjälp av `indexer-cli`. Godkänn och avbryt kommandon tar en matris av åtgärds-id som inmatning. -- Utförande-arbetaren kollar regelbundet kön för godkända åtgärder. Den hämtar de `approved` åtgärderna från kön, försöker utföra dem och uppdaterar värdena i databasen beroende på utförandestatus till `success` eller `failed`. -- Om en åtgärd är framgångsrik kommer arbetaren att se till att det finns en indexeringsregel som berättar för agenten hur allokeringen ska hanteras framöver, användbart när man tar manuella åtgärder medan agenten är i `auto` eller `oversight` -läge. -- Indexer kan övervaka åtgärds kön för att se en historia över åtgärdsutförande och om det behövs godkänna om och uppdatera åtgärdsobjekt om de misslyckades med utförande. Åtgärds kön ger en historia över alla köade och tagna åtgärder. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Datamodell: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Exempel på användning från källa: +Example usage from source: ```bash graph indexer actions get all @@ -677,114 +677,114 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Observera att åtgärdstyper som stöds för allokeringshantering har olika krav på indata: +Note that supported action types for allocation management have different input requirements: -- `Tilldela` - allokera insats till en specifik subgraph-deploering +- `Allocate` - allocate stake to a specific subgraph deployment - - obligatoriska åtgärdsparametrar: + - required action params: - deploymentID - - belopp + - amount -- `Avslå` - stäng allokeringen och frigör insatsen för omallokering någon annanstans +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - obligatoriska åtgärdsparametrar: + - required action params: - allocationID - deploymentID - - valfria åtgärdsparametrar: + - optional action params: - poi - - force (tvingar användning av den angivna POI även om den inte matchar det som grafnoden tillhandahåller) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Omallokera` - atomiskt stäng allokeringen och öppna en ny allokering för samma subgraph-deploering +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - obligatoriska åtgärdsparametrar: + - required action params: - allocationID - deploymentID - - belopp - - valfria åtgärdsparametrar: + - amount + - optional action params: - poi - - force (tvingar användning av den angivna POI även om den inte matchar det som grafnoden tillhandahåller) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Kostnadsmodeller +#### Cost models -Kostnadsmodeller tillhandahåller dynamisk prissättning för frågor baserat på marknaden och frågans egenskaper. Indexer Service delar en kostnadsmodell med gatewayerna för varje subgraph för vilka de avser att svara på frågor. Gatewayerna använder i sin tur kostnadsmodellen för att fatta beslut om indexeringsval per fråga och för att förhandla om betalning med valda indexers. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Agora-språket ger ett flexibelt format för deklaration av kostnadsmodeller för frågor. En Agora-prismodell är en sekvens av uttalanden som utförs i ordning för varje toppnivåfråga i en GraphQL-fråga. För varje toppnivåfråga avgör det första uttalandet som matchar den priset för den frågan. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Ett uttalande består av en predikat, som används för att matcha GraphQL-frågor, och ett kostnadsuttryck som när det utvärderas ger en kostnad i decimal GRT. Värden i den namngivna argumentpositionen i en fråga kan fångas i predikatet och användas i uttrycket. Globala variabler kan också sättas och ersättas för platshållare i ett uttryck. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Exempel kostnadsmodell: +Example cost model: ``` -# Detta uttalande fångar skip-värdet, -# använder ett booleskt uttryck i predikatet för att matcha specifika frågor som använder `skip` -# och ett kostnadsuttryck för att beräkna kostnaden baserat på `skip`-värdet och den globala SYSTEM_LOAD +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; -# Denna standard matchar alla GraphQL-uttryck. -# Den använder en Global som ersatts i uttrycket för att beräkna kostnaden +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost default => 0.1 * $SYSTEM_LOAD; ``` -Exempel på kostnadskalkyl enligt ovanstående modell: +Example query costing using the above model: -| Fråga | Pris | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Tillämpning av kostnadsmodellen +#### Applying the cost model -Kostnadsmodeller tillämpas via Indexer CLI, som skickar dem till Indexer Management API för Indexer agent för lagring i databasen. Indexer Service kommer sedan att hämta dem och servera kostnadsmodellerna till gatewayerna när de begär dem. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interagera med nätverket +## Interacting with the network -### Satsa i protokollet +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -När en Indexer har satsat GRT i protokollet kan [Indexer-komponenterna](/indexing/overview/#indexer-components) startas och börja interagera med nätverket. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Godkänn tokens +#### Approve tokens -1. Öppna [Remix-appen](https://remix.ethereum.org/) i en webbläsare +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. I `Filutforskaren` skapa en fil med namnet **GraphToken.abi** med [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. Under miljö väljer du `Injected Web3` och under `Konto` väljer du din Indexer-adress. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Ange kontraktadressen för GraphToken - Klistra in kontraktadressen för GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) bredvid `Vid adress` och klicka på knappen `Vid adress` för att tillämpa. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Anropa funktionen `approve(spender, amount)` för att godkänna Staking-kontraktet. Fyll i `spender` med Staking-kontraktadressen (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) och `amount` med de tokens som ska satsas (i wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Satsa tokens +#### Stake tokens -1. Öppna [Remix-appen](https://remix.ethereum.org/) i en webbläsare +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. I `Filutforskaren` skapa en fil med namnet **Staking.abi** med stakings ABI. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. Under miljö väljer du `Injected Web3` och under `Konto` väljer du din Indexer-adress. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Ange kontraktadressen för Staking - Klistra in kontraktadressen för Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) bredvid `Vid adress` och klicka på knappen `Vid adress` för att tillämpa. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Anropa `stake()` för att satsa GRT i protokollet. +6. Call `stake()` to stake GRT in the protocol. -7. (Valfritt) Indexers kan godkänna en annan adress att vara operatör för sin Indexer-infrastruktur för att separera de nycklar som kontrollerar medlen från de som utför dagliga åtgärder som att tilldela på subgrafer och servera (betalda) frågor. För att ställa in operatören anropas `setOperator()` med operatörsadressen. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (Valfritt) För att kontrollera fördelningen av belöningar och strategiskt attrahera Delegators kan Indexers uppdatera sina delegationsparametrar genom att uppdatera sina `indexingRewardCut` (delar per miljon), `queryFeeCut` (delar per miljon) och `cooldownBlocks` (antal block). För att göra detta, anropa `setDelegationParameters()`. Följande exempel anger `queryFeeCut` för att fördela 95% av frågebidragen till Indexer och 5% till Delegators, ställ `indexingRewardCut` för att fördela 60% av indexbelöningarna till Indexer och 40% till Delegators, och ställ in perioden för `thecooldownBlocks` till 500 block. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### En allokations livscykel +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Det rekommenderas att Indexers använder funktionen för offchain-synkronisering för att synkronisera subgraph-deploys till kedjehuvudet innan de skapar allokeringen på kedjan. Den här funktionen är särskilt användbar för subgraphs som kan ta längre tid än 28 epoker att synkronisera eller har vissa chanser att misslyckas obestämt. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 314bfc5d2e3aa10ab8eab2df3bda23b727ad89d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:48 -0500 Subject: [PATCH 0083/1534] New translations overview.mdx (Turkish) --- website/src/pages/tr/indexing/overview.mdx | 368 ++++++++++----------- 1 file changed, 184 insertions(+), 184 deletions(-) diff --git a/website/src/pages/tr/indexing/overview.mdx b/website/src/pages/tr/indexing/overview.mdx index c2d6ff32381d..1ebe85a6ab2d 100644 --- a/website/src/pages/tr/indexing/overview.mdx +++ b/website/src/pages/tr/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexing +title: Indexing Overview +sidebarTitle: Genel Bakış --- İndeksleyiciler, indeksleme ve sorgu işleme hizmetleri sağlamak için Graph Token'leri (GRT) stake eden Graph Ağındaki düğüm operatörleridir. İndeksleyiciler, hizmetleri karşılığında sorgu ücretleri ve indeksleme ödülleri kazanırlar. Ayrıca üstel bir indirim fonksiyonuna göre geri ödenen sorgu ücretleri de kazanırlar. @@ -8,7 +9,7 @@ GRT that is staked in the protocol is subject to a thawing period and can be sla Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. -## SSS +## FAQ ### What is the minimum stake required to be an Indexer on the network? @@ -59,8 +60,7 @@ query indexerAllocations { Use Etherscan to call `getRewards()`: - Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* To call `getRewards()`: +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - Enter the **allocationID** in the input. - Click the **Query** button. @@ -79,17 +79,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Sorgu ücretleri ağ geçidi tarafından toplanır ve üstel indirim fonksiyonuna göre indeksleyicilere dağıtılır ([buradan](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162) GIP'e bakınız). Üstel indirim fonksiyonu, indeksleyicilerin sorguları dürüstçe sunarak en iyi sonucu elde etmelerini sağlamanın bir yolu olarak önerilmiştir. İndeksleyicileri, toplayabilecekleri sorgu ücretlerine göre büyük miktarda pay (bir sorguya hizmet verirken hata yaptıklarında kesinti olabilir) ayırmaya teşvik ederek çalışmaktadır. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Bir tahsisat kapatıldıktan sonra iadeler İndeksleyici tarafından talep edilebilir. Talep edildikten sonra, sorgu ücreti iadeleri, sorgu ücreti kesintisi ve üstel indirim fonksiyonuna göre İndeksleyiciye ve Delegatörlerine dağıtılır. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - İndeksleyiciye dağıtılacak sorgu ücreti iadelerinin %'si. Bu %95 olarak ayarlanırsa, İndeksleyici bir tahsisat kapatıldığında kazanılan sorgu ücretlerinin %95'ini alır ve diğer %5'lik kısım Delegatörlere gider. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - İndeksleyiciye dağıtılacak indeksleme ödüllerinin %'si. Bu, %95 olarak ayarlanırsa, indeksleyici, bir tahsis kapatıldığında indeksleme ödül havuzunun %95'ini alacak ve delegatörler diğer %5'i bölüşecektir. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -99,86 +99,86 @@ Indexers may differentiate themselves by applying advanced techniques for making - **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Stake edilen miktar** - Diğer İndeksleyicilerin davranışlarını izlemek veya belirli subgraphlara tahsis edilen toplam stake oranlarına bakmak, bir İndeksleyicinin subgraph sorgularına yönelik arz tarafını izlemesine olanak tanır; böylece ağın güvendiği subgraphları veya daha fazla arz ihtiyacı olabilecek subgraphları belirlemesine yardımcı olur. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **İndeksleme ödülü olmayan subgraphlar** - Bazı subgraphlar, IPFS gibi desteklenmeyen özellikleri kullandıkları veya ana ağ dışında başka bir ağı sorguladıkları için indeksleme ödülü üretmezler. İndeksleme ödülleri üretmeyen bir subgraph üzerinde bir mesaj göreceksiniz. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### Donanım gereksinimleri nelerdir? +### What are the hardware requirements? -- **Düşük** - Birkaç subgraph'ı indekslemeye başlamak için yeterli, muhtemelen genişletilmesi gerekecek. -- **Standart** - Varsayılan kurulum, örnek k8s/terraform dağıtım manifestlerinde kullanılan budur. -- **Orta** - 100 subgraph ve saniyede 200-500 isteği destekleyen Üretim İndeksleyici. -- **Yüksek** - Şu anda kullanılan tüm subgraphları indekslemek ve ilgili trafik için istekleri sunmak için hazırlanmıştır. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Kurulum | Postgres
(CPU'lar) | Postgres
(GB cinsinden bellek) | Postgres
(TB cinsinden disk) | VM'ler
(CPU'lar) | VM'ler
(GB cinsinden bellek) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Düşük | 4 | 8 | 1 | 4 | 16 | -| Standart | 8 | 30 | 1 | 12 | 48 | -| Orta | 16 | 64 | 2 | 32 | 64 | -| Yüksek | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### Bir İndeksleyicinin alması gereken bazı temel güvenlik önlemleri nelerdir? +### What are some basic security precautions an Indexer should take? -- **Operatör cüzdanı** - Bir operatör cüzdanı oluşturmak önemli bir önlemdir, çünkü bir İndeksleyicinin stake'i kontrol eden anahtarları ile günlük işlemleri kontrol eden anahtarları arasında ayrım yapmasına olanak tanır. Talimatlar için [Protokolde Stake](/indexing/overview/#stake-in-the-protocol) bölümüne bakın. +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Firewall** - Yalnızca İndeksleyici hizmetinin herkese açık olması gerekir ve yönetici bağlantı noktalarının ve veritabanı erişiminin kilitlenmesine özellikle dikkat edilmelidir: Graph Node JSON-RPC uç noktası (varsayılan bağlantı noktası: 8030), İndeksleyici yönetim API uç noktası (varsayılan bağlantı noktası: 18000) ve Postgres veritabanı uç noktası (varsayılan bağlantı noktası: 5432) herkese açık olmamalıdır. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Altyapı +## Infrastructure -Bir İndeksleyicinin altyapısının merkezinde, indekslenen ağları izleyen, bir subgraph tanımına göre verileri ayıklayan, yükleyen ve [GraphQL API](/about/#how-the-graph-works) olarak sunan Graph Düğümü yer alır. Graph Düğümü'nün, her bir indekslenmiş ağdan gelen verileri açığa çıkaran bir uç noktaya; veri kaynağı için bir IPFS düğümüne; deposu için bir PostgreSQL veritabanına ve ağ ile etkileşimlerini kolaylaştıran İndeksleyici bileşenlerine bağlanması gerekir. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL veritabanı** - Graph Düğümü için ana depo, subgraph verilerinin depolandığı yerdir. İndeksleyici hizmeti ve aracı da durum kanalı verilerini, maliyet modellerini, indeksleme kurallarını ve tahsis eylemlerini depolamak için veritabanını kullanır. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Veri uç noktası** - EVM uyumlu ağlar için, Graph Düğümü'nün EVM uyumlu bir JSON-RPC API'si sunan bir uç noktaya bağlanması gerekir. Bu, tek bir istemci şeklinde olabileceği gibi birden fazla istemci arasında yük dengelemesi yapan daha karmaşık bir kurulum da olabilir. Belirli subgraphlar'ın arşiv modu ve/veya parite izleme API'si gibi belirli istemci yetenekleri gerektireceğinin bilincinde olmak önemlidir. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS düğümü (sürüm 5'ten düşük)** - Subgraph dağıtım üst verisi IPFS ağında saklanır. Graph Düğümü, subgraph manifesti ve tüm bağlantılı dosyaları almak için subgraph dağıtımı sırasında öncelikle IPFS düğümüne erişir. Ağ İndeksleyicilerinin kendi IPFS düğümlerini barındırmalarına gerek yoktur, ağ için bir IPFS düğümü https://ipfs.network.thegraph.com adresinde barındırılır. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **İndeksleyici hizmeti** - Ağ ile gerekli tüm harici iletişimleri gerçekleştirir. Maliyet modellerini ve indeksleme durumlarını paylaşır, ağ geçitlerinden gelen sorgu isteklerini bir Graph Düğümü'ne iletir ve ağ geçidi ile durum kanalları aracılığıyla sorgu ödemelerini yönetir. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **İndeksleyici aracı** - Ağa kaydolma, Graph Düğümlerine subgraph dağıtımlarını ve tahsisleri yönetme dahil olmak üzere İndeksleyicilerin zincir üzerindeki etkileşimlerini kolaylaştırır. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Prometheus metrik sunucusu** - Graph Düğümü ve İndeksleyici bileşenleri metriklerini metrik sunucusuna kaydeder. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Not: Çevik ölçeklendirmeyi desteklemek için, sorgulama ve indeksleme endişelerinin sorgu düğümleri ve indeks düğümleri olarak farklı düğüm kümeleri arasında ayrılması önerilir. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Portlara genel bakış +### Ports overview -> **Önemli**: Portları herkese açık hale getirme konusunda dikkatli olun - **yönetim portları** kilitli tutulmalıdır. Bu, aşağıda ayrıntıları verilen Graph Düğümü JSON-RPC ve İndeksleyici yönetim uç noktalarını içerir. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Port | Amaç | Rotalar | CLI Argümanı | Ortam Değişkeni | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP sunucusu
( subgraph sorguları için) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
( subgraph abonelikleri için) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(dağıtımları yönetmek için) | / | --admin-port | - | -| 8030 | Subgraph indeksleme durum API'si | /graphql | --index-node-port | - | -| 8040 | Prometheus metrikleri | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### İndeksleyici Hizmeti +#### Indexer Service -| Port | Amaç | Rotalar | CLI Argümanı | Ortam Değişkeni | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP sunucusu
(ücretli subgraph sorguları için) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrikleri | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### İndeksleyici Aracı +#### Indexer Agent -| Port | Amaç | Rotalar | CLI Argümanı | Ortam Değişkeni | -| ---- | --------------------------- | ------- | ------------------------- | --------------------------------------- | -| 8000 | İndeksleyici yönetim API'si | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Google Cloud'da Terraform kullanarak sunucu altyapısını kurun +### Setup server infrastructure using Terraform on Google Cloud -> Not: İndeksleyiciler alternatif olarak AWS, Microsoft Azure veya Alibaba kullanabilir. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Önkoşulları yükleme +#### Install prerequisites - Google Cloud SDK -- Kubectl komut satırı aracı +- Kubectl command line tool - Terraform -#### Bir Google Cloud Projesi Oluşturun +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Not: Çevik ölçeklendirmeyi desteklemek için, sorgulama ve indeksleme endişe cd terraform ``` -- Google Cloud ile kimlik doğrulaması yapın ve yeni bir proje oluşturun. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Yeni projenin faturalandırılmasını etkinleştirmek için Google Cloud Console'un faturalandırma sayfasını kullanın. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Bir Google Cloud yapılandırması oluşturun. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Gerekli Google Cloud API'lerini etkinleştirin. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Bir hizmet hesabı oluşturun. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Bir sonraki adımda oluşturulacak veritabanı ve Kubernetes kümesi arasında eşlemeyi etkinleştirin. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -256,15 +256,15 @@ indexer= cat > terraform.tfvars < **NOT**: Tüm çalışma zamanı yapılandırma değişkenleri ya başlangıçta komuta parametre olarak ya da `COMPONENT_NAME_VARIABLE_NAME` (örn. `INDEXER_AGENT_ETHEREUM`) biçimindeki ortam değişkenleri kullanılarak uygulanabilir. +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### İndeksleyici Aracı +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### İndeksleyici hizmeti +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -514,58 +514,58 @@ graph-indexer-service start \ | pino-pretty ``` -#### İndeksleyici CLI +#### Indexer CLI -İndeksleyici CLI, `graph indexer` terminalinden erişilebilen [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) için bir eklentidir. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### İndeksleyici CLI kullanarak indeksleyici yönetimi +#### Indexer management using Indexer CLI -**İndeksleyici Yönetim API**'si ile etkileşim için önerilen araç, **Graph CLI**'nın bir uzantısı olan **İndeksleyici CLI**'dır. İndeksleyici aracısı, İndeksleyici adına ağ ile bağımsız olarak etkileşim kurmak için bir İndeksleyiciden gelen girdiye ihtiyaç duyar. İndeksleyici aracı davranışını tanımlama mekanizması **tahsis yönetim** modu ve **indeksleme kurallarıdır**. Otomatik modda, bir İndeksleyici, sorguları indekslemek ve sunmak üzere subgraph'ları seçmek için kendi özel stratejisini uygulamak üzere **indeksleme kurallarını** kullanabilir. Kurallar, aracı tarafından sunulan ve İndeksleyici Yönetim API'si olarak bilinen bir GraphQL API aracılığıyla yönetilir. Manuel modda, bir İndeksleyici **eylem kuyruğunu** kullanarak tahsis eylemleri oluşturabilir ve yürütülmeden önce bunları açıkça onaylayabilir. Gözetim modunda, **indeksleme kuralları** **eylem kuyruğunu** doldurmak için kullanılır ve ayrıca yürütme için açık onay gerektirir. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Kullanış +#### Usage -**İndeksleyici CLI**, tipik olarak bağlantı noktası yönlendirme yoluyla İndeksleyici aracısına bağlanır, bu nedenle CLI'nın aynı sunucuda veya kümede çalışması gerekmez. Başlamanıza yardımcı olmak ve biraz bilgi vermek için CLI burada kısaca açıklanacaktır. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` - İndeksleyici yönetim API'sine bağlanın. Tipik olarak sunucuya bağlantı port yönlendirme yoluyla açılır, böylece CLI uzaktan kolayca çalıştırılabilir. (Örnek: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Tüm kuralları almak için `` olarak `all` veya genel varsayılanları almak için `global` kullanarak bir veya daha fazla indeksleme kuralı alın. Dağıtıma özgü kuralların genel kuralla birleştirileceğini belirtmek için bir `--merged` ek bağımsız değişkeni kullanılabilir. Bu şekilde, indeksleyici aracısında uygulanırlar. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - Bir veya daha fazla indeksleme kuralı ayarlayın. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - Varsa bir subgraph dağıtımını indekslemeye başlayın ve `decisionBasis` değerini `always` olarak ayarlayın, böylece İndeksleyici aracı her zaman onu indekslemeyi seçecektir. Genel kural her zaman olarak ayarlanırsa, ağdaki mevcut tüm subgraphlar indekslenecektir. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` - Bir dağıtımı indekslemeyi durdurun ve `decisionBasis` değerini never olarak ayarlayın, böylece indekslenecek dağıtımlara karar verirken bu dağıtımı atlayacaktır. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` — Bir dağıtım için `decisionBasis` öğesini `rules` olarak ayarlayın, böylece İndeksleyici aracısı bu dağıtımı indeksleyip indekslemeyeceğine karar vermek için indeksleme kurallarını kullanacaktır. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` - Kuyruk tahsis eylemi +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` - Kuyruk yeniden tahsis eylemi +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` - Kuyruk tahsis kaldırma eylemi +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - id(kimlik) belirtilmemişse kuyruktaki tüm eylemleri iptal eder, aksi takdirde ayırıcı olarak boşluk içeren id dizisini iptal eder +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - Yürütme için birden fazla eylemi onaylama +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` - Çalışanı onaylanan eylemleri derhal gerçekleştirmeye zorlama +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Çıktıda kuralları görüntüleyen tüm komutlar, `-output` argümanını kullanarak desteklenen çıktı formatları (`table`, `yaml`, and `json`) arasında seçim yapabilir. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### İndeksleme kuralları +#### Indexing rules -İndeksleme kuralları genel varsayılanlar olarak ya da ID'leri kullanılarak belirli subgraph dağıtımları için uygulanabilir. Diğer tüm alanlar isteğe bağlı iken `deployment` ve `decisionBasis` alanları zorunludur. Bir indeksleme kuralı `decisionBasis` olarak `rules`'a sahipse, indeksleyici aracı bu kuraldaki boş olmayan eşik değerlerini ilgili dağıtım için ağdan alınan değerlerle karşılaştıracaktır. Subgraph dağıtımı, eşik değerlerden herhangi birinin üstünde (veya altında) değerlere sahipse, indeksleme için seçilecektir. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Örneğin, genel kuralın `minStake` değeri **5** (GRT) ise, kendisine 5 (GRT)'den fazla pay tahsis edilen tüm subgraph dağıtımları indekslenecektir. Eşik kuralları arasında `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, ve `minAverageQueryFees` yer alır. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Veri modeli: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -İndeksleme kuralı örnek kullanımı: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### Eylemler kuyruğu CLI +#### Actions queue CLI -Indexer-cli, eylem kuyruğu ile manuel olarak çalışmak için bir `actions` modülü sağlar. Eylem kuyruğu ile etkileşim kurmak için indeksleyici yönetim sunucusu tarafından barındırılan **Graphql API**'sini kullanır. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -Eylem yürütme çalışanı, yalnızca `ActionStatus = approved` değerine sahipse yürütmek için kuyruktan öğeleri alır. Önerilen yolda eylemler ActionStatus = queued ile kuyruğa eklenir, bu nedenle zincir üzerinde yürütülmeleri için onaylanmaları gerekir. Genel işleyiş şu şekilde olacaktır: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Kuyruğa üçüncü şahıs optimizasyon aracı veya indexer-cli kullanıcısı tarafından eklenen eylem -- İndeksleyici, sıraya alınan tüm eylemleri görüntülemek için `indexer-cli`'yi kullanabilir -- İndeksleyici (veya diğer yazılımlar) `indexer-cli` kullanarak kuyruktaki eylemleri onaylayabilir veya iptal edebilir. Onaylama ve iptal etme komutları girdi olarak bir dizi eylem kimliği alır. -- Yürütme çalışanı, onaylanan eylemler için kuyruğu düzenli olarak tarar. Kuyruktan`approved` eylemleri alır, bunları yürütmeye çalışır ve yürütme durumuna bağlı olarak db'deki değerleri `success` veya `failed` olarak günceller. -- Eğer bir eylem başarılı olursa, çalışan, aracı `auto` veya `oversight` modunda manuel eylemler gerçekleştirirken yararlı olacak şekilde, aracıya tahsisi ileriye dönük olarak nasıl yöneteceğini söyleyen bir indeksleme kuralının mevcut olmasını sağlayacaktır. -- İndeksleyici, eylem yürütme geçmişini görmek için eylem kuyruğunu izleyebilir ve gerekirse yürütmede başarısız olan eylem öğelerini yeniden onaylayabilir ve güncelleyebilir. Eylem kuyruğu, kuyruğa alınan ve gerçekleştirilen tüm eylemlerin bir geçmiş kaydını sağlar. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Veri modeli: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Kaynaktan kullanım örneği: +Example usage from source: ```bash graph indexer actions get all @@ -677,114 +677,114 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Tahsis yönetimi için desteklenen eylem türlerinin farklı girdi gereksinimleri olduğunu unutmayın: +Note that supported action types for allocation management have different input requirements: -- `Allocate` - stake'i belirli bir subgraph dağıtımına tahsis eder +- `Allocate` - allocate stake to a specific subgraph deployment - - gerekli eylem parametreleri: + - required action params: - deploymentID - amount -- `Unallocate` - tahsisi kapatır, stake'i başka bir yere yeniden tahsis etmek için serbest bırakır +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - gerekli eylem parametreleri: + - required action params: - allocationID - deploymentID - - opsiyonel eylem parametreleri: + - optional action params: - poi - - force (graph-node'un sağladığıyla uyuşmasa bile sağlanan POI'yi kullanmaya zorlar) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - tahsisi atomik olarak kapatır ve aynı subgraph dağıtımı için yeni bir tahsis açar +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - gerekli eylem parametreleri: + - required action params: - allocationID - deploymentID - amount - - opsiyonel eylem parametreleri: + - optional action params: - poi - - force (graph-node'un sağladığıyla uyuşmasa bile sağlanan POI'yi kullanmaya zorlar) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Maliyet modelleri +#### Cost models -Maliyet modelleri, pazar ve sorgu niteliklerine dayalı olarak sorgular için dinamik fiyatlandırma sağlar. İndeksleyici Hizmeti, sorgulara yanıt vermeyi amaçladıkları her bir subgraph için ağ geçitleriyle bir maliyet modeli paylaşır. Ağ geçitleri de sorgu başına İndeksleyici seçim kararları vermek ve seçilen İndeksleyicilerle ödeme pazarlığı yapmak için maliyet modelini kullanır. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Agora dili, sorgular için maliyet modellerini bildirmek için esnek bir format sağlar. Agora fiyat modeli, bir GraphQL sorgusundaki her üst düzey sorgu için sırayla çalışan bir deyimler serisidir. Her üst düzey sorgu için, onunla eşleşen ilk ifade o sorgunun fiyatını belirler. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Bir ifade, GraphQL sorgularını eşleştirmek için kullanılan bir evet-hayır sorusundan ve değerlendirildiğinde GRT cinsinden ondalık maliyet çıktısı veren bir maliyet ifadesinden oluşur. Bir sorgunun adlandırılmış argüman konumundaki değerleri evet-hayır sorusunda yakalanabilir ve ifadede kullanılabilir. Ayrıca, genel değerler de ayarlanabilir ve bir ifadedeki yer tutucuların yerine kullanılabilir. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Örnek maliyet modeli: +Example cost model: ``` -# Bu ifade atlama değerini yakalar, -# `skip` kullanan belirli sorguları eşleştirmek için evet-hayır sorusunda bir boolean ifadesi kullanır -# `skip` değerine ve SYSTEM_LOAD genel değerine dayalı olarak maliyeti hesaplamak için bir maliyet ifadesi +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; -# Bu varsayılan, herhangi bir GraphQL ifadesiyle eşleşecektir. -# Maliyeti hesaplamak için ifadenin içine yerleştirilmiş bir Global (genel) kullanır +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost default => 0.1 * $SYSTEM_LOAD; ``` -Yukarıdaki modeli kullanarak örnek sorgu maliyetlendirmesi: +Example query costing using the above model: -| Sorgu | Fiyat | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Maliyet modelinin uygulanması +#### Applying the cost model -Maliyet modelleri, onları veritabanında saklanmak üzere İndeksleyici aracısının İndeksleyici Yönetim API'sine aktaran İndeksleyici CLI aracılığıyla uygulanır. İndeksleyici Hizmeti daha sonra bunları alır ve maliyet modellerini istedikleri zaman ağ geçitlerine sunar. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Ağ ile etkileşim kurma +## Interacting with the network -### Protokolde stake +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -Bir İndeksleyici protokolde GRT'yi stake ettikten sonra, [İndeksleyici kompenetleri ](/indexing/overview/#indexer-components) başlatılabilir ve ağ ile etkileşimlerine başlayabilir. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Tokenleri onaylama +#### Approve tokens -1. [Remix uygulamasını](https://remix.ethereum.org/) bir tarayıcıda açın +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `File Explorer`'da [ABI token](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json) ile **GraphToken.abi** adında bir dosya oluşturun. +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. Ortam altında `Injected Web3`'ü seçin ve `Account` altında İndeksleyici adresinizi seçin. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. GraphToken sözleşme adresini ayarlayın - GraphToken sözleşme adresini (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) `At Address` seçeneğinin yanına yapıştırın ve uygulamak için `At address` düğmesine tıklayın. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Staking sözleşmesini onaylamak için `approve(spender, amount)` fonksiyonunu çağırın. `spender`'ı Staking sözleşmesi adresiyle (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) ve `amount`'ı stake edilecek tokenlarla (wei cinsinden) doldurun. +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Tokenleri stake et +#### Stake tokens -1. [Remix uygulamasını](https://remix.ethereum.org/) bir tarayıcıda açın +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `File Explorer`'da, Staking ABI ile **Staking.abi** adında bir dosya oluşturun. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. Ortam altında `Injected Web3`'ü seçin ve `Account` altında İndeksleyici adresinizi seçin. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Stake sözleşmesi adresini ayarlayın - Stake sözleşmesi adresini (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) `At address` seçeneğinin yanına yapıştırın ve uygulamak için `At Address` düğmesine tıklayın. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. GRT'yi protokolde stake etmek için `stake()` fonksiyonunu çağırın. +6. Call `stake()` to stake GRT in the protocol. -7. (Opsiyonel) İndeksleyiciler, fonları kontrol eden anahtarları subgraphlar'da tahsis etme ve (ücretli) sorgular sunma gibi günlük eylemleri gerçekleştiren anahtarlardan ayırmak için başka bir adresi kendi İndeksleyici altyapıları için operatör olarak onaylayabilirler. Operatörü ayarlamak için operatör adresi ile `setOperator()` fonksiyonunu çağırın. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. ( Opsiyonel) Ödüllerin dağıtımını kontrol etmek ve Delegatörleri stratejik olarak cezbetmek için İndeksleyiciler, indexingRewardCut (milyon başına parça), queryFeeCut (milyon başına parça) ve cooldownBlocks (blok sayısı) değerlerini güncelleyerek delegasyon parametrelerini güncelleyebilirler. Bunu yapmak için `setDelegationParameters()` fonksiyonunu çağırın. Aşağıdaki örnek queryFeeCut değerini sorgu indirimlerinin %95'ini İndeksleyiciye ve %5'ini Delegatörlere dağıtacak şekilde ayarlar, indexingRewardCut değerini indeksleme ödüllerinin %60'ını İndeksleyiciye ve %40'ını Delegatörlere dağıtacak şekilde ayarlar ve `thecooldownBlocks` süresini 500 blok olarak ayarlar. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### Bir tahsisin ömrü +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -İndeksleyicilerin, zincir üstünde tahsis oluşturmadan önce subgraph dağıtımlarını chainhead ile senkronize etmek için zincir dışı senkronizasyon fonksiyonunu kullanmaları önerilir. Bu özellik bilhassa senkronize edilmesi 28 dönemden daha uzun sürebilecek veya belirsiz bir şekilde başarısız olma ihtimali olan subgraphlar için kullanışlıdır. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 3f7d73663ffbffed73713ea062f4c0f5080938d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:49 -0500 Subject: [PATCH 0084/1534] New translations overview.mdx (Ukrainian) --- website/src/pages/uk/indexing/overview.mdx | 424 ++++++++++----------- 1 file changed, 212 insertions(+), 212 deletions(-) diff --git a/website/src/pages/uk/indexing/overview.mdx b/website/src/pages/uk/indexing/overview.mdx index 5f5804ddf9d4..42a312cb5caa 100644 --- a/website/src/pages/uk/indexing/overview.mdx +++ b/website/src/pages/uk/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Індексація +title: Indexing Overview +sidebarTitle: Overview --- Індексатори - це оператори нод у The Graph Network, які стейкають токени Graph (GRT), щоб надавати послуги з індексування та обробки запитів. За свої послуги індексатори отримують плату за запити та винагороду за індексацію. Вони також заробляють комісію за запити, яка повертається відповідно до експоненціальної функції компенсації. @@ -8,46 +9,46 @@ GRT, які застейкані в протоколі, підлягають п Індексатори вибирають підграфи для індексування на основі сигналу від кураторів, де куратори стейкають GRT, щоб вказати, які підграфи є якісними та мають бути пріоритетними. Споживачі (наприклад, додатки) також можуть задавати параметри, за якими індексатори обробляють запити до їхніх підграфів, і встановлювати налаштування щодо оплати за запити. -## Поширені запитання +## FAQ -### Яка мінімальна кількість GRT необхідна для того, щоб стати індексатором в мережі? +### What is the minimum stake required to be an Indexer on the network? -Мінімальна кількість для індексатора наразі встановлена на рівні 100 тис. GRT. +The minimum stake for an Indexer is currently set to 100K GRT. -### Які джерела доходу для індексатора? +### What are the revenue streams for an Indexer? -**Отримання комісії за опрацювання запитів** - Платежі за обслуговування запитів у мережі. Ці платежі здійснюються через відповідні канали між індексатором та сіткою. Кожен запит містить платіж, а відповідна відповідь - доказ правдивості результату запиту. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**Винагорода за індексацію** - Винагорода за індексацію, що генерується шляхом 3% річної інфляції в масштабі всього протоколу, розподіляється серед індексаторів, які індексують розгортання підграфів для мережі. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### Як розподіляються винагороди за індексацію? +### How are indexing rewards distributed? -Винагорода за індексацію надходить від інфляції протоколу, яка встановлена на рівні 3% річної емісії. Вони розподіляються між підграфами на основі частки всіх кураторських сигналів на кожному, а потім розподіляються пропорційно між індексаторами на основі їхнього виділеного стейку на відповідному підграфі. **Щоб мати право на винагороду, розподіл має бути закрито за допомогою доказу індексації (proof of indexing - POI), яке відповідає стандартам, установленим арбітражним регламентом.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### Що за доказ індексації (POI)? +### What is a proof of indexing (POI)? -POI використовуються в мережі для перевірки того, що Індексатор індексує підграфи, на які вони були розподілені. POI для першого блоку поточної епохи має бути надісланий при закритті розподілу, щоб цей розподіл мав право на винагороду за індексацію. POI для блоку - це дайджест усіх транзакцій сховища об'єктів для певного розгортання підграфів до цього блоку включно. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### Коли розподіляються винагороди за індексацію? +### When are indexing rewards distributed? -Винагороди безперервно накопичуються, поки алокації активні та розподілені протягом 28 періодів. Винагороди збираються Індексаторами та розподіляються щоразу, коли їхні розподіли закриваються. Це відбувається або вручну, коли Індексатор хоче примусово закрити їх, або після 28 епох, делегат може закрити розподіл для Індексатора, але це не призводить до отримання винагороди. 28 епох - це максимальний час роботи розподілів (зараз одна епоха триває ~24 години). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### Чи можна відстежувати винагороди за індексацію, що очікують на розгляд? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Багато інформаційних панелей, створених спільнотою, містять очікувані значення винагород, і їх можна легко перевірити вручну, виконавши ці кроки: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql -запит indexerAllocations { - indexer(id: "") { { } }) { indexer(id: "") +query indexerAllocations { + indexer(id: "") { allocations { activeForIndexer { - allocations { id + allocations { id } } @@ -56,129 +57,128 @@ The RewardsManager contract has a read-only [getRewards](https://github.com/grap } ``` -Використовуйте Etherscan для виклику `getRewards()`: +Use Etherscan to call `getRewards()`: -- Перейдіть до [EtherScan інтерфейсу, потім до контракту Rewards](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* Оберіть `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - Введіть **allocationID** у вхідних даних. - - Натисніть кнопку **Query**. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### Що таке спори (disputes) та де я можу їх переглянути? +### What are disputes and where can I view them? -Запити індексатора та розподіли можуть бути оскаржені на Graph протягом відповідного періоду оскарження. Цей період варіюється в залежності від типу спору. Для запитів/атестацій вікно спору триває 7 епох, тоді як для розподілів - 56. Після закінчення цих періодів спори не можуть бути відкриті ні проти розподілів, ні проти запитів. При відкритті спору учасник повинен внести депозит у розмірі не менше 10 000 GRT, який буде заблокований до завершення спору і винесення рішення по ній. Fisherman (рибалка) - це будь-який учасник мережі, який відкриває спори. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -Спори мають **три** можливих результати, так само як і депозит учасників. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- Якщо спір буде відхилено, GRT, внесений в якості депозиту, буде спалено, а Індексатор не буде порізаний. -- Якщо суперечка буде вирішена внічию, депозит користувача буде повернуто, а індексатора не буде порізано. -- Якщо спір буде задоволено, GRT, внесений учасником, буде повернуто, Індексатора буде порізано, а рибалка отримає 50% від GRT, які були порізані. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -Спори можна переглянути в інтерфейсі на сторінці профілю індексатора у вкладці `Disputes`. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### Що за комісії за опрацювання запитів і коли вони розподіляються? +### What are query fee rebates and when are they distributed? Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Після закриття розподілу виплати можуть бути отримані індексатором. Після клейму, комісії за запити розподіляються між індексатором та його делегатами на основі зниження цін за запити та експоненціальної функції повернень. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### Що таке query fee cut і indexing reward cut? +### What is query fee cut and indexing reward cut? -Значення `queryFeeCut` і `indexingRewardCut` є параметрами делегування, які Індексатор може встановлювати разом із cooldownBlocks, щоб контролювати розподіл GRT між Індексатором і його Делегатами. Перегляньте останні кроки в розділі [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol), щоб отримати інструкції щодо встановлення параметрів делегування. +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - відсоток від комісій за опрацювання запитів, який буде розподілено між Індексатором та Делегатами. Якщо цей параметр встановлено на рівні в 95%, індексатор отримає 95% від комісій за запити, зароблених при закритті розподілу, а решта 5% підуть Делегатам. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - відсоток від винагород за індексацію, який буде розподілено між Індексатором та Делегатами. Якщо цей параметр встановлено на рівні в 95%, індексатор отримає 95% винагороди за індексацію, коли розподіл буде закрито, а делегати розділять між собою решту 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### Як індексатори знають, які підграфи індексувати? +### How do Indexers know which subgraphs to index? -Індексатори можуть відрізнятися один від одного, застосовуючи передові методи для прийняття рішень щодо індексування підграфів, але для того, щоб дати загальне уявлення, ми обговоримо кілька ключових метрик, які використовуються для оцінки підграфів у мережі: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **Сигнали від кураторів ** - якщо велика частка від загальної кількості сигналів у мережі припадає на певний підграф, то це є хорошим показником інтересу до цього підграфа, особливо під час фази бутстрапу, коли обсяг запитів зростає. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Збори за запити** - Історичні дані про обсяг зборів за запити, зібрані для певного підграфа, є хорошим індикатором майбутнього попиту. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **Кількість застейканих токенів** - Спостереження за поведінкою інших індексаторів або аналіз пропорцій від загального стейку токенів, виділених на конкретні підграфи, може дозволити індексатору відстежувати попит на запити до підграфів, щоб виявити підграфи, яким мережа довіряє, або підграфи, які можуть показати потребу в більшій кількості токенів. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Підграфи без винагороди за індексування** - Деякі підграфи не отримують винагороди за індексування переважно через те, що вони використовують непідтримувані можливості, такі як IPFS, або тому, що вони запитують іншу мережу за межами основної мережі. Ви побачите повідомлення про те, що підграф не генерує винагороду за індексацію. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### Які вимоги до апаратного обладнання? +### What are the hardware requirements? -- **Small** - достатній для початку індексування декількох підграфів, ймовірно, потрібно буде розширити. -- **Standard** - налаштування за замовчуванням, це те, що використовується у прикладі маніфестів розгортання k8s/terraform. -- **Medium** - продуктивний індексатор, що підтримує 100 підграфів і 200-500 запитів на секунду. -- **Large** - підготовлений для індексації всіх підграфів, що використовуються наразі, і обслуговування запитів на відповідний трафік. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Налаштування | Postgres
(CPU) | Postgres
(пам'ять в GB) | Postgres
(диск у ТБ) | VMs
(Центральні CPU) | VMs
(пам'ять у ГБ) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### Яких основних заходів безпеки повинен дотримуватися індексатор? +### What are some basic security precautions an Indexer should take? -- **Operator wallet**. Налаштування гаманця оператора є важливим запобіжним заходом, оскільки він дозволяє Індексатору підтримувати відокремлення між своїми ключами, які контролюють застейкані токени, і тими, які використовуються для щоденних операцій. Інструкції див. у розділі [Stake in Protocol](/indexing/overview/#stake-in-the-protocol). +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **Firewall** - Публічно доступною має бути лише сервіс-індексатор, і особливу увагу слід приділити блокуванню портів адміністратора і доступу до бази даних: не слід відкривати кінцеву точку JSON-RPC Graph Node (порт за замовчуванням: 8030), кінцеву точку API управління індексатором (порт за замовчуванням: 18000) і кінцеву точку бази даних Postgres (порт за замовчуванням: 5432). +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Інфраструктура +## Infrastructure -Центром інфраструктури індексатора є Graph Node, яка відстежує індексовані мережі, вибирає і завантажує дані відповідно до визначення підграфів і слугує як [GraphQL API](/about/#how-the-graph-works). Graph Node має бути підключена до кінцевої точки, яка надає дані з кожної проіндексованої мережі; ноди IPFS для отримання даних; бази даних PostgreSQL для їх зберігання; і компонентів індексатора, які полегшують його взаємодію з мережею. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL database** - основне сховище для Graph Node, саме тут зберігаються дані підграфів. Сервіс-індексатор та агент також використовують базу даних для зберігання даних каналів стану, моделей витрат, правил індексації та дій з розподілу. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Data endpoint** - Для EVM-сумісних мереж Graph Node має бути підключена до кінцевої точки, який надає EVM-сумісний JSON-RPC API. Це може бути як один клієнт, так і більш складне налаштування, яке розподіляє навантаження між кількома. Важливо знати, що певні підграфи потребують особливих можливостей клієнта, таких як режим архівування та/або API відстеження парності. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **Нода IPFS (версія менше ніж 5)** - метадані розгортання підграфів зберігаються у мережі IPFS. Graph Node звертається до вузла IPFS під час розгортання підграфів, щоб отримати маніфест підграфів і всі пов'язані файли. Індексаторам в мережі не потрібно запускати власну ноду IPFS, ноду IPFS для мережі розміщено на https://ipfs.network.thegraph.com. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **Indexer service** - виконує всі необхідні зовнішні комунікації з мережею. Обмінюється моделями витрат і статусами індексації, передає запити від шлюзів до Graph Node, а також керує оплатою запитів через відповідні канали зі шлюзом. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - полегшує взаємодію індексаторів в мережі, включаючи реєстрацію у мережі, керування розгортанням підграфів у Graph Node/-ах та керуванням розподілами. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Сервер метрик Prometheus** - Компоненти Graph Node та індексаторів реєструють свої метрики на відповідному на сервері. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Примітка: Для підтримки гнучкого масштабування рекомендується розділити завдання запитів та індексації між різними наборами нод: нодами запитів та нодами індексації. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Огляд портів +### Ports overview -> **Важливо**: Будьте обережні з публічним відкриттям портів - **адміністративні порти** слід тримати закритими. Це стосується і JSON-RPC Graph Node та кінцевих точок керування індексатором, описаних нижче. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Порт | Призначення | Розташування | Аргумент CLI | Перемінна оточення | -| --- | --- | --- | --- | --- | -| 8000 | HTTP-сервер GraphQL
(для запитів до підграфів) | /subgraphs/id/...
/subgraphs/name/.../... | --http-порт | - | -| 8001 | GraphQL WS
(для підписок на підграфи) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(для керування розгортаннями) | / | --admin-port | - | -| 8030 | API стану індексації підграфів | /graphql | --index-node-port | - | -| 8040 | Метрики Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Служба індексації +#### Indexer Service -| Порт | Призначення | Розташування | Аргумент CLI | Перемінна оточення | -| --- | --- | --- | --- | --- | -| 7600 | HTTP-сервер GraphQL
(для платних запитів до підграфів) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Метрики Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Агент індексації +#### Indexer Agent -| Порт | Призначення | Розташування | Аргумент CLI | Перемінна оточення | -| --- | --- | --- | --- | --- | -| 8000 | API для керування індексатором | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Налаштування серверної інфраструктури з використанням Terraform на Google Cloud +### Setup server infrastructure using Terraform on Google Cloud -> Примітка: Індексатори можуть альтернативно використовувати AWS, Microsoft Azure або Alibaba. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Встановіть необхідні умови +#### Install prerequisites - Google Cloud SDK -- Kubectl - інструмент командного рядка +- Kubectl command line tool - Terraform -#### Створіть проєкт на Google Cloud +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Query fees are collected by the gateway and distributed to indexers according to cd terraform ``` -- Авторизуйтесь у Google Cloud і створіть новий проєкт. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Використовуйте сторінку виставлення рахунків у Google Cloud Console, щоб увімкнути виставлення рахунків для нового проєкту. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Створіть конфігурацію Google Cloud. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Увімкніть необхідні Google Cloud API. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Створіть обліковий запис сервісу. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Увімкніть взаємодію між базою даних і кластером Kubernetes, який буде створено на наступному кроці. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,35 +249,35 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Створіть мінімальний конфігураційний файл terraform (оновлюйте за потреби). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars <, щоб він вказував на каталог k8s/base`. +- Copy the directory `k8s/overlays` to a new directory `$dir,` and adjust the `bases` entry in `$dir/kustomization.yaml` so that it points to the directory `k8s/base`. -- Прочитайте всі файли в `$dir` і змініть будь-які значення, як зазначено в коментарях. +- Read through all the files in `$dir` and adjust any values as indicated in the comments. -Установіть усі ресурси за допомогою `kubectl apply -k $dir`. +Deploy all resources with `kubectl apply -k $dir`. ### Graph Node [Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. -#### Початок роботи з базового коду +#### Getting started from source -#### Встановіть необхідні умови +#### Install prerequisites - **Rust** @@ -307,15 +307,15 @@ kubectl config use-context $(kubectl config get-contexts --output='name' - **IPFS** -- **Додаткові вимоги для користувачів Ubuntu** - Для запуску Graph Node на Ubuntu може знадобитися декілька додаткових програм. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Налаштування +#### Setup -1. Запуск сервера бази даних PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -323,9 +323,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Клонуйте [Graph Node](https://github.com/graphprotocol/graph-node) репозиторій і створіть базовий код, запустивши `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Тепер, коли всі необхідні складові налаштовано, запустіть Graph Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -334,48 +334,48 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### Початок роботи з Docker +#### Getting started using Docker -#### Передумови +#### Prerequisites -- **Нода Ethereum**. За замовчуванням, docker compose використовуватиме основну мережу: [http:// host.docker.internal:8545](http://host.docker.internal:8545) для підключення до ноди Ethereum на вашій основній машині. Ви можете замінити це ім’я та Url-адресу мережі, оновивши `docker-compose.yaml`. +- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. -#### Налаштування +#### Setup -1. Клонуйте Graph Node і перейдіть до каталогу Docker: +1. Clone Graph Node and navigate to the Docker directory: ```sh git clone https://github.com/graphprotocol/graph-node cd graph-node/docker ``` -2. Лише для користувачів Linux – використовуйте IP-адресу хоста замість `host.docker.internal` у `docker-compose.yaml` за допомогою доданого скрипта: +2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yaml `using the included script: ```sh ./setup.sh ``` -3. Запустіть локальну Graph Node, яка буде підключена до вашої кінцевої точки Ethereum: +3. Start a local Graph Node that will connect to your Ethereum endpoint: ```sh docker-compose up ``` -### Компоненти індексатора +### Indexer components -Для успішної участі в мережі потрібен майже постійний моніторинг і взаємодію, тому ми створили набір додатків Typescript для полегшення участі в мережі індексаторів. Існує три компоненти Індексатора: +To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **Indexer agent** - Агент відстежує мережу та власну інфраструктуру індексатора і керує розгортаннями підграфів, які індексуються та розподіляються в мережі, а також визначає, скільки ресурсів виділяється для кожного з них. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **Indexer service** - Єдиний компонент, який потрібно виставляти назовні, сервіс передає запити підграфів до graph node, керує каналами стану для оплати запитів, ділиться важливою інформацією для прийняття рішень з клієнтами, такими як шлюзи. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. -- **Indexer CLI** - інтерфейс командного рядка для керування агентом індексатора. Він дозволяє індексаторам керувати моделями витрат, ручним розподілом, чергою дій та правилами індексування. +- **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. -#### Початок роботи +#### Getting started The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/indexing/overview/#stake-in-the-protocol) before starting up your Indexer components! -#### З NPM-пакетів +#### From NPM packages ```sh npm install -g @graphprotocol/indexer-service @@ -398,7 +398,7 @@ graph indexer connect http://localhost:18000/ graph indexer ... ``` -#### З базового коду +#### From source ```sh # From Repo root directory @@ -418,16 +418,16 @@ cd packages/indexer-cli ./bin/graph-indexer-cli indexer ... ``` -#### Використання docker +#### Using docker -- Витягнути images з реєстру +- Pull images from the registry ```sh docker pull ghcr.io/graphprotocol/indexer-service:latest docker pull ghcr.io/graphprotocol/indexer-agent:latest ``` -Або створіть images локально з базового коду +Or build images locally from source ```sh # Indexer service @@ -442,22 +442,22 @@ docker build \ -t indexer-agent:latest \ ``` -- Запустіть компоненти +- Run the components ```sh docker run -p 7600:7600 -it indexer-service:latest ... docker run -p 18000:8000 -it indexer-agent:latest ... ``` -**ПРИМІТКА**. Після запуску контейнерів сервіс-індексатора повинен бути доступний за адресою [http://localhost:7600](http://localhost:7600), а агент індексатора повинен виставляти API управління індексатором за адресою [http://localhost:18000/](http://localhost:18000/). +**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). -#### Використання K8 та Terraform +#### Using K8s and Terraform -Див. розділ [Налаштування серверної інфраструктури з використанням Terraform на Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) +See the [Setup Server Infrastructure Using Terraform on Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) section -#### Використання +#### Usage -> **ПРИМІТКА**. Усі змінні конфігурації середовища виконання можна застосовувати як параметри до команди під час запуску або за допомогою змінних середовища у форматі `COMPONENT_NAME_VARIABLE_NAME`(наприклад, `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). #### Indexer agent @@ -516,56 +516,56 @@ graph-indexer-service start \ #### Indexer CLI -Indexer CLI — це плагін для [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) доступного у терміналі в `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Керування індексатором за допомогою Indexer CLI +#### Indexer management using Indexer CLI -Пропонованим інструментом для взаємодії з **API керування індексатором** є **Indexer CLI**, розширення **Graph CLI**. Агенту потрібні дані від індексатора, щоб автономно взаємодіяти з мережею від імені індексатора. Механізмом визначення поведінки агента індексатора є режим **керування розподілом** і **правила індексування**. У автоматичному режимі індексатор може використовувати **правила індексування**, щоб застосувати свою конкретну стратегію вибору підграфів для індексування та обслуговування запитів. Правила керуються через GraphQL API, який обслуговує агент і відомий як Indexer Management API. У ручному режимі індексатор може створювати дії розподілу за допомогою **черги дій** і явно затверджувати їх перед виконанням. У режимі нагляду **правила індексування** використовуються для заповнення **черги дій** і також вимагають явного схвалення для виконання. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Використання +#### Usage -**Indexer CLI** з'єднується з агентом індексатора, як правило, за допомогою переадресації портів, тому CLI не потрібно запускати на тому ж сервері або кластері. Щоб допомогти вам розпочати роботу і надати деякий контекст, тут буде коротко описано CLI. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` – приєднатися до API керування індексатором. Зазвичай підключення до сервера відкривається через перенаправлення портів, тому CLI можна легко керувати віддалено. (Приклад: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - отримайте одне або кілька правил індексування, використовуючи `all` як ``, щоб отримати всі правила, або `global`, щоб отримати глобальні значення за замовчуванням. Додатковий аргумент `--merged` можна використовувати, щоб вказати, що правила розгортання об’єднуються з глобальним правилом. Ось як вони застосовуються в агенті індексатора. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - задати одне або декілька правил індексування. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` – почніть індексувати розгортання підграфа, якщо доступно, і встановіть для його `decisionBasis` значення `always`, тому агент індексатора завжди вирішить індексувати його. Якщо для глобального правила встановлено значення завжди, усі доступні підграфи в мережі будуть проіндексовані. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` – зупиніть індексацію розгортання та встановіть для параметра `decisionBasis` значення «never», тому він пропустить це розгортання при прийнятті рішення про те, які розгортання індексувати. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` — установіть `decisionBasis` для розгортання на `rules`, щоб агент індексатора використовував правила індексування, щоб вирішити, чи індексувати це розгортання. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` – розподіл черги +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` – перерозподіл черги +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` – скасування розподілу черги +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - скасувати всі дії в черзі, якщо ідентифікатор не вказано, інакше скасувати масив ідентифікаторів із пробілом у якості розмежувача +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - затвердити кілька дій для виконання +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` - змусити виконавця негайно виконати затверджені дії +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Усі команди, які відображають правила у виводі, можуть вибирати між підтримуваними форматами виводу (`table`, `yaml` та `json`) за допомогою `- output` аргументу. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### Правила індексації +#### Indexing rules -Правила індексування можуть бути застосовані як глобальні за замовчуванням або для конкретних розгортань підграфів, використовуючи їхні ідентифікатори. Поля `deployment` і `decisionBasis` є обов’язковими, тоді як усі інші поля необов’язкові. Якщо правило індексування має `rules` як `decisionBasis`, тоді агент індексатора порівнює ненульові порогові значення цього правила зі значеннями, отриманими з мережі для відповідного розгортання. Якщо розгортання підграфа має значення вище (або нижче) будь-якого з порогів, його буде вибрано для індексування. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Наприклад, якщо глобальне правило має значення `minStake` в **5** (GRT), будь-яке розгортання підграфа, що має 5 (GRT) в стейкінгу, буде проіндексоване. Порогові правила включають `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, та `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Модель даних: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -Приклад використання правила індексації: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -613,18 +613,18 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK #### Actions queue CLI -indexer-cli надає модуль `actions` для ручної роботи з чергою дій. Для взаємодії з чергою дій він використовує **Graphql API**, розміщений на сервері керування індексатором. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -Виконавець дії буде брати елементи з черги на виконання, тільки якщо вони мають `ActionStatus = accepted`. У рекомендованому шляху дії додаються до черги зі статусом ActionStatus = queued, тому вони повинні бути схвалені, щоб бути виконаними в мережі. Загальний потік буде виглядати так: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- Дія, додана до черги стороннім оптимізатором або користувачем indexer-cli -- Індексатор може використовувати `indexer-cli` для перегляду всіх дій у черзі -- Індексатор (або інша програма) може затверджувати або скасовувати дії у черзі за допомогою `indexer-cli`. Команди затвердження та скасування приймають на вхід масив ідентифікаторів дій. -- Виконавець регулярно проводить опитування черги на предмет схвалення дій. Він бере `approved` дії з черги, пробує виконати їх і потім оновлює значення в db в залежності від статусу виконання до `success` або `failed`. -- Якщо дія успішна, виконавець забезпечить наявність правила індексації, яке підкаже агенту, як керувати розподілом далі, що корисно при виконанні ручних дій під час перебування агента в режимі `auto` або ` oversight`. -- Індексатор може стежити за чергою дій, щоб бачити історію виконання дій і при необхідності повторно затверджувати та оновлювати елементи дій, якщо вони не були виконані. У черзі дій відображається історія всіх дій, поставлених у чергу і виконаних. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Модель даних: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -Приклад використання: +Example usage from source: ```bash graph indexer actions get all @@ -677,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -Зверніть увагу, що підтримувані типи дій для управління розподілами мають різні вимоги до вхідних даних: +Note that supported action types for allocation management have different input requirements: -- `Allocate` - розподілити стейк на конкретне розгортання підграфа +- `Allocate` - allocate stake to a specific subgraph deployment - - необхідні параметри: + - required action params: - deploymentID - amount -- `Unallocate` - закритий розподіл, що звільняє стейк для перерозподілу в інше місце +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - необхідні параметри: + - required action params: - allocationID - deploymentID - - необов'язкові параметри: + - optional action params: - poi - - force (змушує використовувати наданий POI, навіть якщо він не збігається з тим, що надає graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - автоматично закриваємо розподіл і відкриває новий для того ж розгортання підграфа +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - необхідні параметри: + - required action params: - allocationID - deploymentID - amount - - необов'язкові параметри: + - optional action params: - poi - - force (змушує використовувати наданий POI, навіть якщо він не збігається з тим, що надає graph-node) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Моделі витрат +#### Cost models -Моделі витрат забезпечують динамічне ціноутворення для запитів на основі атрибутів ринку і запиту. Сервіс-індексатора ділиться моделлю вартості зі шлюзами для кожного підграфа, для якого вони мають намір відповідати на запити. Шлюзи, і собі, використовують модель вартості для прийняття рішень про вибір індексатора для кожного запиту і для обговорень про оплату з обраними індексаторами. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Мова Agora надає гнучкий формат для оголошення цінових моделей для запитів. Цінова модель Agora - це послідовність операторів, які виконуються по черзі для кожного запиту верхнього рівня в запиті GraphQL. Для кожного запиту верхнього рівня перший оператор, який йому відповідає, визначає ціну для цього запиту. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Оператор складається з параметра, який використовується для зіставлення запитів GraphQL, і виразу вартості, який при обчисленні виводить вартість у десяткових GRT. Значення в позиції іменованого аргументу запиту можуть бути перехоплені в пропозицію і використані у виразі. Globals також можна встановлювати та підставляти замість символів-замінників у виразі. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Приклад моделі витрат: +Example cost model: ``` # This statement captures the skip value, @@ -727,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -Приклад розрахунку вартості запиту за наведеною вище моделлю: +Example query costing using the above model: -| Запит | Ціна | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Застосування вартісної моделі +#### Applying the cost model -Моделі витрат застосовуються за допомогою Indexer CLI, яка передає їх до Indexer Management API агента індексатора для зберігання в базі даних. Потім сервіс-індексатора забирає їх і надає моделі витрат шлюзам, коли вони їх запитують. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Взаємодія з мережею +## Interacting with the network -### Стейкінг в протоколі +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -Після того, як індексатор застейкав GRT токени у протоколі, [Indexer components](/indexing/overview/#indexer-components) можна запустити та почати взаємодію з мережею. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Approve токенів +#### Approve tokens -1. Відкрийте [програму Remix](https://remix.ethereum.org/) у браузері +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. У `File Explorer` створіть файл під назвою **GraphToken.abi** з [токен ABI](https://raw.githubusercontent.com/graphprotocol /contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. У розділі середовища виберіть `Injected Web3`, а в розділі `Account` виберіть свою адресу індексатора. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Установіть адресу контракту GraphToken. Вставте адресу контракту GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) поруч із полем `At Address` та натисніть кнопку `At Address`, щоб застосувати. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Виберіть функцію `approve(spender, amount)`, для схвалення транзакції про взаємодію зі стейкінг контрактом. Заповніть поле `spender` адресою стейкінг контракта (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) і поле `amount` кількістю токенів, які буду використані для стейка (у wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Стейкінг токенів +#### Stake tokens -1. Відкрийте [програму Remix](https://remix.ethereum.org/) у браузері +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. У `File Explorer` створіть файл під назвою **Staking.abi** зі стейкінгом ABI. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. У розділі середовища виберіть `Injected Web3`, а в розділі `Account` виберіть свою адресу індексатора. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Установіть адресу стейкінг контракта. Вставте цю адресу (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) поруч із полем `At Address` та натисніть кнопку `At Address`, щоб застосувати. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Викличте `stake()`, щоб застейкати токени GRT у протоколі. +6. Call `stake()` to stake GRT in the protocol. -7. (Необов’язково) Індексатори можуть схвалити іншу адресу як оператора своєї інфраструктури індексатора, щоб відокремити ключі, які контролюють кошти, від тих, які виконують щоденні дії, такі як розподіл на підграфах і обслуговування (оплачених) запитів. Щоб встановити оператора, викликайте `setOperator()` з адресою оператора. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (Необов'язково) Для того, щоб контролювати розподіл винагород і стратегічно залучати делегатів, індексатори можуть оновлювати свої параметри делегування, змінюючи indexingRewardCut (частини на мільйон), queryFeeCut (частини на мільйон) і cooldownBlocks (кількість блоків). Для цього викличте `setDelegationParameters()`. У наступному прикладі queryFeeCut налаштовує на розподіл 95% комісії за запити для Індексатора та 5% для Делегатів, та встановлює indexingRewardCutto розподіляти 60% винагород за індексування для Індексатора та 40% для Делегатів, і встановлює `thecooldownBlocks` період до 500 блоків. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### Термін розподілу +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Індексаторам рекомендується використовувати функцію offchain синхронізації для синхронізації розгортань підграфів з head of chain перед створенням розподілів у мережі. Ця функція особливо корисна для підграфів, синхронізація яких може зайняти понад 28 епох, або для яких існує ймовірність невизначеного збою. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From b37d3c434517c1d8cd671ac2848fe5ad823019b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:50 -0500 Subject: [PATCH 0085/1534] New translations overview.mdx (Chinese Simplified) --- website/src/pages/zh/indexing/overview.mdx | 442 ++++++++++----------- 1 file changed, 221 insertions(+), 221 deletions(-) diff --git a/website/src/pages/zh/indexing/overview.mdx b/website/src/pages/zh/indexing/overview.mdx index c1a7e2cb4ac7..b64a6d6f265e 100644 --- a/website/src/pages/zh/indexing/overview.mdx +++ b/website/src/pages/zh/indexing/overview.mdx @@ -1,44 +1,45 @@ --- -title: 索引 +title: 索引概述 +sidebarTitle: 概述 --- -索引人是Graph 网络中的节点运营商,他们质押 Graph 通证 (GRT) 以提供索引和查询处理服务。 索引人通过他们的服务赚取查询费和索引奖励。 他们还根据 Cobbs-Douglas 回扣函数从回扣池中赚取收益,该回扣池与所有网络贡献者按他们的工作成比例共享。 +索引人是The Graph 网络中的节点运营商,他们质押 Graph代币(GRT) 以提供索引和查询处理服务。 索引人通过他们的服务赚取查询费和索引奖励。 他们还根据 Cobbs-Douglas 回扣函数从回扣池中赚取收益,该回扣池与所有网络贡献者按他们的工作成比例共享。 抵押在协议中的 GRT 会受到解冻期的影响,如果索引人是恶意的并向应用程序提供不正确的数据或索引不正确,则可能会被削减。 索引人也可以从委托人那里获得委托,为网络做出贡献。 索引人根据子图的策展信号选择要索引的子图,其中策展人质押 GRT 以指示哪些子图是高质量的并应优先考虑。 消费者(例如应用程序)还可以设置索引人处理其子图查询的参数,并设置查询费用定价的偏好。 -## 常见问题 +## FAQ -### 成为网络索引人所需的最低份额是多少? +### What is the minimum stake required to be an Indexer on the network? -索引人的最低抵押数量目前设置为 10万个 GRT。 +The minimum stake for an Indexer is currently set to 100K GRT. -### 索引人的收入来源是什么? +### What are the revenue streams for an Indexer? -**查询费返利** - 为网络上的查询服务支付的费用。这些支付通过索引人和网关之间的状态通道进行调解。来自网关的每个查询请求都包含一个支付和相应的响应,一个查询结果有效性的证明。 +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**索引奖励** - 通过 3% 的年度协议范围通货膨胀生成,索引奖励分配给为网络索引子图部署的索引器。 +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### 索引奖励如何分配? +### How are indexing rewards distributed? -索引奖励来自协议通胀,每年发行量设定为 3%。 它们根据每个子图上所有管理信号的比例分布在子图上,然后根据他们在该子图上分配的份额按比例分配给索引人。 **一项分配必须以符合仲裁章程规定的标准的有效索引证明(POI)来结束,才有资格获得奖励。** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### 什么是索引证明 (POI)? +### What is a proof of indexing (POI)? -网络中使用 POI 来验证索引人是否正在索引它们分配的子图。 在关闭该分配的分配时,必须提交当前时期第一个区块的 POI,才有资格获得索引奖励。 区块的 POI 是特定子图部署的所有实体存储交易的摘要,直到并包括该块。 +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### 索引奖励什么时候分配? +### When are indexing rewards distributed? -当分配活动在28个时期内分配时,分配会不断累积奖励。奖励由索引人收集,并在分配结束时分发。 这可以手动发生,每当索引人想要强制关闭它们时,或者在 28 个时期后,委托人可以关闭索引人的分配,但这会导致没有奖励。28 个时期是最大分配生命周期(现在,一个 时期持续约 24 小时)。 +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### 可以监控待处理的索引人奖励吗? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -许多社区制作的仪表板包含悬而未决的奖励值,通过以下步骤可以很容易地手动检查这些值: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,129 +57,128 @@ query indexerAllocations { } ``` -使用 Etherscan 调用 `getRewards()`: +Use Etherscan to call `getRewards()`: -- 导航到[奖励合约的 Etherscan 界面](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* 调用`getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - 在输入中输入**分配 ID**. - - 点击**查询**按钮. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### 争议是什么? 在哪里可以查看? +### What are disputes and where can I view them? -在争议期间,索引人的查询和分配都可以在Graph上进行争论。 争议期限因争议类型而异。 查询/证明有 7 个时期的争议窗口,而分配有 56 个时期。 在这些期限过后,不能对分配或查询提出争议。 当争议开始时,Fishermen需要至少 10000 GRT 的押金,押金将被锁定,直到争议结束并给出解决方案。 Fishermen是任何引发争议的网络参与者。 +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -争议有**三种**可能的结果,Fishermen的存款也是如此。 +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- 如果争议被驳回,Fishermen存入的 GRT 将被消耗,争议的索引人将不会被削减。 -- 如果以平局方式解决争议,Fishermen的押金将被退还,并且争议的索引人不会被削减。 -- 如果争议被接受,Fishermen存入的 GRT 将被退回,有争议的索引人将被削减,Fishermen将获得被削减的 GRT的50%。 +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -争议可以在用户界面中的 `争议`标签下的索引人档案页中查看。 +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### 什么是查询费返利? 何时分配? +### What are query fee rebates and when are they distributed? Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -一旦分配已结束且争议期已过,索引人就可以要求回扣。 查询费用回扣根据查询费用减免和委托池比例分配给索引人及其委托人。 +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### 什么是查询费减免和索引奖励减免? +### What is query fee cut and indexing reward cut? -`queryFeeCut` 和 `indexingRewardCut` 值是委托的参数,该索引可以设置连同 cooldownBlocks 控制 GRT 的索引和他们的委托人之间的分配。 有关设置委托参数的说明,请参阅[协议中的质押](/indexing/overview/#stake-in-the-protocol)的最后步骤。 +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **查询费用削减** - 将分配给索引人的子图上累积的查询费用回扣的百分比。 如果将其设置为 95%,则在申请分配时,索引人将获得查询费用回扣池的 95%,另外 5% 将分配给委托人。 +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **索引奖励削减** - 分配给索引人的子图上累积的索引奖励的百分比。 如果将其设置为 95%,则当分配结束时,索引人将获得索引奖励池的 95%,而委托人将分配其他 5%。 +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### 索引人如何知道要索引哪些子图? +### How do Indexers know which subgraphs to index? -索引人可以通过应用高级技术来进行子图索引决策,从而使自己与众不同,但为了给出一个大致的概念,我们将讨论几个用于评估网络中子图的关键指标: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **策展信号** - 应用于特定子图的网络策展信号的比例是对该子图兴趣的一个很好的指标,尤其是在引导阶段,当查询量不断上升时。 +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **收取的查询费** - 特定子图收取的查询费的历史数据是未来需求的良好指标。 +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **质押量** - 监控其他索引人的行为或查看分配给特定子图的总质押量的比例,可以让索引人监控子图查询的供应方,以确定网络显示出信心的子图或可能显示出需要更多供应的子图。 +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **没有索引奖励的子图** - 一些子图不会产生索引奖励,主要是因为它们使用了不受支持的功能,如 IPFS,或者因为它们正在查询主网之外的另一个网络。 如果子图未生成索引奖励,您将在子图上看到一条消息。 +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### 对硬件有什么要求? +### What are the hardware requirements? -- **小型** - 足以开始索引几个子图,可能需要扩展。 -- **标准** - 默认设置,这是在 k8s/terraform 部署清单示例中使用的。 -- **中型** - 生产型索引人支持 100 个子图和每秒 200-500 个请求。 -- **大型** -准备对当前使用的所有子图进行索引,并为相关流量的请求提供服务。 +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| 设置 | (CPU 数量) | (内存 GB) | (硬盘 TB) | (CPU 数量) | (内存 GB) | -| ---- | :--------: | :-------: | :-------: | :--------: | :-------: | -| 小型 | 4 | 8 | 1 | 4 | 16 | -| 标准 | 8 | 30 | 1 | 12 | 48 | -| 中型 | 16 | 64 | 2 | 32 | 64 | -| 大型 | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### 索引人应该采取哪些基本的安全防范措施? +### What are some basic security precautions an Indexer should take? -- **运营商钱包** - 设置运营商钱包是一项重要的预防措施,因为它允许索引器在控制股权的密钥和控制日常交易的密钥之间保持分离-天操作。有关说明,请参阅[Stake in Protocol](/indexing/overview/#stake-in-the-protocol)。 +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **防火墙** - 只有索引人服务需要公开,尤其要注意锁定管理端口和数据库访问:Graph 节点 JSON-RPC 端点(默认端口:8030)、索引人管理 API 端点(默认端口:18000)和 Postgres 数据库端点(默认端口:5432)不应暴露。 +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## 基础设施 +## Infrastructure -索引人基础设施的中心是Graph节点,监控索引网络,根据子图定义提取和加载数据,并将其作为[GraphQL API](/about/#how-the-graph-works)提供。Graph节点需要连接到一个端点,该端点暴露来自每个索引网络的数据;用于源数据的IPFS节点;用于其存储的PostgreSQL数据库;以及促进其与网络交互的索引人组件。 +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL 数据库** - Graph节点的主要存储,这是存储子图数据的地方。 索引人服务和代理也使用数据库来存储状态通道数据、成本模型、索引规则以及分配操作。 +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **数据端点**-对于兼容EVM的网络,Graph节点需要连接到一个公开兼容EVM JSON-RPC API的端点。这可以采取单个客户端的形式,也可以是跨多个客户端进行负载平衡的更复杂的设置。需要注意的是,某些子图将需要特定的客户端功能,如存档模式和/或奇偶校验跟踪API。 +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS 节点(版本小于 5)** - 子图部署元数据存储在 IPFS 网络上。 Graph节点在子图部署期间主要访问 IPFS 节点,以获取子图清单和所有链接文件。 网络索引人不需要托管自己的 IPFS 节点,网络的 IPFS 节点是托管在https://ipfs.network.thegraph.com。 +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **索引人服务** -处理所有网络必要的外部通信。 共享成本模型和索引状态,将来自网关的查询请求传递给一个Graph节点,并通过状态通道与网关管理查询支付。 +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **索引人代理** - 促进索引人在链上的交互,包括在网络上注册,管理子图部署到其Graph节点,以及管理分配。 +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Prometheus 指标服务器** - Graph节点 和索引人组件将其指标记录到指标服务器。 +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -注意:为了支持敏捷扩展,建议在不同的节点集之间分开查询和索引问题:查询节点和索引节点。 +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### 端口概述 +### Ports overview -> **重要**: 公开暴露端口时要小心 - **管理端口** 应保持锁定。 这包括下面详述的Graph节点 JSON-RPC 和索引人管理端点。 +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph 节点 -| 端口 | 用途 | 路径 | CLI 参数 | 环境 变量 | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP 服务
(用于子图查询) | /subgraphs/id/...

/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(用于子图订阅) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(用于管理部署) | / | --admin-port | - | -| 8030 | 子图索引状态 API | /graphql | --index-node-port | - | -| 8040 | Prometheus 指标 | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### 索引人服务 +#### Indexer Service -| 端口 | 用途 | 路径 | CLI 参数 | 环境 变量 | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP 服务器
(用于付费子图查询) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus 指标 | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### 索引人代理 +#### Indexer Agent -| 端口 | 用途 | 路径 | CLI 参数 | 环境 变量 | -| ---- | -------------- | ---- | ------------------------- | --------------------------------------- | -| 8000 | 索引人管理 API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### 在谷歌云上使用 Terraform 建立服务器基础设施 +### Setup server infrastructure using Terraform on Google Cloud -> 注意:索引人可以选择使用AWS,Microsoft Azure, or Alibaba。 +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### 安装先决条件 +#### Install prerequisites -- 谷歌云 SDK -- Kubectl 命令行工具 +- Google Cloud SDK +- Kubectl command line tool - Terraform -#### 创建一个谷歌云项目 +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Query fees are collected by the gateway and distributed to indexers according to cd terraform ``` -- 通过谷歌云认证并创建一个新项目。 +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- 使用 Google Cloud Console 的计费页面为新项目启用计费。 +- Use the Google Cloud Console's billing page to enable billing for the new project. -- 创建谷歌云配置。 +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- 启用所需的 Google Cloud API。 +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- 创建一个服务账户。 +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- 启用将在下一步中创建的数据库和 Kubernetes 集群之间的对等连接。 +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,22 +249,22 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- 创建最小的 terraform 配置文件(根据需要更新)。 +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **注意**: 所有的运行时配置变量可以在启动时作为参数应用到命令中,也可以使用格式为 `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`) 的环境变量。 +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### 索引代理 +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### 索引人服务 +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -514,58 +514,58 @@ graph-indexer-service start \ | pino-pretty ``` -#### 索引人 CLI +#### Indexer CLI -Indexer CLI 是一个可以在终端访问`graph indexer`的插件,地址是[`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli)。 +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### 使用Indexer CLI 管理索引人 +#### Indexer management using Indexer CLI -与**Indexer Management API**交互的建议工具是 **IndexerCLI**,它是 **GraphCLI** 的扩展。Indexer 代理需要来自 Indexer 的输入,以便代表 Indexer 与网络进行自主交互。定义 Indexer 代理行为的机制是**分配管理**模式和**索引规则**。在自动模式下,Indexer 可以使用**索引规则**应用它们的特定策略来选择子图以索引并查询。规则通过代理提供的 GraphQLAPI 进行管理,称为 Indexer Management API。在手动模式下,索引人可以使用**操作队列**创建分配操作,并在操作队列执行之前显式批准它们。在监督模式下,**索引规则**用于填充**操作队列**,并且还需要执行的显式批准。 +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### 使用方法 +#### Usage -**Indexer CLI**连接到索引人代理,通常是通过端口转发,因此 CLI 不需要运行在同一服务器或集群上。 为了帮助你入门,并提供一些背景,这里将简要介绍 CLI。 +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` - 连接到索引人管理 API。 通常情况下,与服务器的连接是通过端口转发打开的,所以 CLI 可以很容易地进行远程操作。 (例如: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` -获取一个或多个索引规则,使用 `all` 作为`` 来获取所有规则,或使用 `global` 来获取全局默认规则。 可以使用额外的参数 `--merged` 来指定将特定部署规则与全局规则合并。 这就是它们在索引人代理中的应用方式。 +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` -设置一个或多个索引规则。 +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - 开始索引子图部署(如果可用),并将其`decisionBasis`设置为`always`, 这样索引人代理将始终选择对其进行索引。 如果全局规则被设置为总是,那么网络上所有可用的子图都将被索引。 +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` -停止对某个部署进行索引,并将其 `decisionBasis`设置为 never, 这样它在决定要索引的部署时就会跳过这个部署。 +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` —将部署的 `thedecisionBasis`设置为`规则`, 这样索引人代理将使用索引规则来决定是否对这个部署进行索引。 +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` -队列分配操作 +- `graph indexer action queue allocate ` - Queue allocation action -- `graph indexer action queue reallocate ` -队列重新分配操作 +- `graph indexer action queue reallocate ` - Queue reallocate action -- `graph indexer action queue unallocate ` - 队列未分配操作 +- `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - 如果未指定 id,则取消队列中的所有操作,否则取消以空格作为分隔符的 id 数组 +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - 批准执行多个操作 +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` - 强迫工人立即执行批准的行动 +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -所有在输出中显示规则的命令都可以使用 `-output`参数在支持的输出格式(`table`, `yaml`, and `json`)之间进行选择。 +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### 索引规则 +#### Indexing rules -索引规则可以作为全局默认值应用,也可以使用它们的 ID 应用于特定的子图部署。`部署`和 `decisionBase` 字段是强制性的,而所有其他字段都是可选的。当索引规则具有`规则`作为 `decisionBase` 时,索引人代理将比较该规则上的非空阈值与从网络获取的用于相应部署的值。如果子图部署的值高于(或低于) 任何阈值,则将选择它进行索引。 +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -例如,如果全局规则的`minStake` 值为**5** (GRT), 则分配给它的份额超过 5 (GRT) 的任何子图部署都将被编入索引。 阈值规则包括`maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, 和 `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -数据模型: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -索引规则用法示例: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -611,20 +611,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### 操作队列CLI +#### Actions queue CLI -Indexer-cli 提供了一个 `actions` 模块,用于手动处理操作队列。它使用由索引器管理服务器托管的 **Graphql API** 与操作队列进行交互。 +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -如果`ActionStatus=已批准`,则操作执行工作人员将仅从队列中获取要执行的项目。在推荐的路径中,操作被添加到ActionStatus=queued的队列中,因此必须经过批准才能在链上执行。一般流程如下: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- 第三方优化器工具或indexer-cli用户添加到队列的操作 -- 索引人可以使用`indexer-cli`查看所有排队的操作 -- 索引人(或其他软件)可以使用`indexer-cli`批准或取消队列中的操作。批准和取消命令将一组操作ID作为输入。 -- 执行工作人员定期轮询队列以获得批准的操作。它将从队列中获取`已批准`的操作,尝试执行它们,并根据执行状态将数据库中的值更新为`成功`或`失败`。 -- 如果操作成功,工作人员将确保存在索引规则,告诉代理如何管理向前的分配,这在代理处于`自动`或`监督`模式时进行手动操作非常有用。 -- 索引人可以监视操作队列以查看操作执行的历史记录,如果需要,可以在操作项执行失败时重新批准和更新操作项。操作队列提供排队和执行的所有操作的历史记录。 +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -数据模型: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -数据源用法示例 +Example usage from source: ```bash graph indexer actions get all @@ -677,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -请注意,分配管理支持的操作类型有不同的输入要求: +Note that supported action types for allocation management have different input requirements: -- `Allocate` - 将份额分配给特定的子图部署 +- `Allocate` - allocate stake to a specific subgraph deployment - - 所需的操作参数: - - 部署ID - - 数量 + - required action params: + - deploymentID + - amount -- `Unallocate` - 结束分配,腾出份额重新分配到其他地方 +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - 所需的操作参数: - - 分配ID - - 部署ID - - 可选操作参数: + - required action params: + - allocationID + - deploymentID + - optional action params: - poi - - force(使用提供的POI的力量,即使它与图形节点提供的不匹配) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - 自动关闭分配并为相同的子图部署打开新的分配 +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - 所需的操作参数: - - 分配ID - - 部署ID - - 数量 - - 可选操作参数: + - required action params: + - allocationID + - deploymentID + - amount + - optional action params: - poi - - force(使用提供的POI的力量,即使它与图形节点提供的不匹配) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### 成本模式 +#### Cost models -成本模型根据市场和查询属性为查询提供动态定价。索引人服务与网关共享一个成本模型,用于它们打算响应查询的每个子图。反过来,网关使用成本模型对每个查询进行索引人选择决策,并与选定的索引人协商付款。 +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Agora 语言提供了一种灵活的格式来声明查询的成本模型。 Agora 价格模型是一系列的语句,它们按照 GraphQL 查询中每个顶层查询的顺序执行。 对于每个顶层查询,第一个与其匹配的语句决定了该查询的价格。 +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -语句由一个用于匹配 GraphQL 查询的谓词和一个成本表达式组成,该表达式在评估时输出一个以十进制 GRT 表示的成本。 查询的命名参数位置中的值可以在谓词中捕获并在表达式中使用。 也可以在表达式中设置全局,并代替占位符。 +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -成本模型示例: +Example cost model: ``` # This statement captures the skip value, @@ -727,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -使用上述模型的查询成本计算示例: +Example query costing using the above model: -| 询问 | 价格 | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### 应用成本模式 +#### Applying the cost model -成本模型是通过索引人 CLI 应用的,CLI 将它们传递给索引人代理的索引人管理 API,以便存储在数据库中。 然后,索引人服务将接收这些模型,并在网关要求时将成本模型提供给它们。 +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## 与网络的交互 +## Interacting with the network -### 在协议中进行质押 +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -一旦索引人将GRT置于协议中,[索引人组件](/indexing/overview/#indexer-components)就可以启动并开始与网络交互。 +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### 批准代币 +#### Approve tokens -1. 在浏览器中打开[Remix app](https://remix.ethereum.org/) 。 +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. 使用[token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json).在`File Explorer`文件夹中创建一个名为**GraphToken.abi**的文件。 +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. 在环境选择`Injected Web3`和`Account` 下面选择你的索引人地址。 +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. 设置 GraphToken 合约地址 - 将 GraphToken 合约地址(`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) 粘贴到`At Address` 旁边 ,单击并应用`At address` 按钮。 +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. 调用`approve(spender, amount)`函数以批准 Staking 合约。 用质押合约地址(`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) 填写`spender` 和`amount` 要质押的代币数量 (in wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### 质押代币 +#### Stake tokens -1. 在浏览器中打开[Remix app](https://remix.ethereum.org/) 。 +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. 在 `File Explorer` 创建一个名为**Staking.abi** 的文件中,使用 staking ABI. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. 在环境选择`Injected Web3`和`Account` 下面选择你的索引人地址。 +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. 设置 GraphToken 合约地址 - 将 GraphToken 地址(`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) 粘贴到`At Address` 旁边 ,单击`At address` 按钮以应用。 +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. 调用 `stake()` 质押协议中的 GRT。 +6. Call `stake()` to stake GRT in the protocol. -7. (可选)索引人可以批准另一个地址作为其索引人基础设施的操作员,以便将控制资金的密钥与执行日常操作,例如在子图上分配和服务(付费)查询的密钥分开。 用操作员地址调用`setOperator()` 设置操作员。 +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (可选)为了控制奖励的分配和战略性地吸引委托人,索引人可以通过更新他们的索引人奖励削减(百万分之一)、查询费用削减(百万分之一)和冷却周期区块(区块数)来更新他们的委托参数。 要实现这一目的需要调用 `setDelegationParameters()`。 以下示例设置查询费用削减将 95% 的查询返利分配给索引人,5% 给委托人,设置索引人奖励削减将 60% 的索引奖励分配给索引人,将 40% 分配给委托人,并将`冷却周期区块`设置为 500 个。 +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### 分配的生命周期 +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -建议索引人在链上创建分配之前,利用链外同步功能将子图部署同步到链头。对于可能需要超过28个时期才能同步或有一些无法确定失败的机会的子图,此功能特别有用。 +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 3c52254aa1f8a64b5de9991614c8b54a62d4dda4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:52 -0500 Subject: [PATCH 0086/1534] New translations overview.mdx (Urdu (Pakistan)) --- website/src/pages/ur/indexing/overview.mdx | 418 ++++++++++----------- 1 file changed, 209 insertions(+), 209 deletions(-) diff --git a/website/src/pages/ur/indexing/overview.mdx b/website/src/pages/ur/indexing/overview.mdx index 20e9e3c120f4..19fdcc4056d1 100644 --- a/website/src/pages/ur/indexing/overview.mdx +++ b/website/src/pages/ur/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: انڈیکسنگ +title: Indexing Overview +sidebarTitle: جائزہ --- انڈیکسرز گراف نیٹ ورک میں نوڈ آپریٹرز ہیں جو انڈیکسنگ اور کیوری پراسیسنگ کی خدمات فراہم کرنے کے لیے گراف ٹوکنز (GRT) کو داؤ پر لگاتے ہیں۔ انڈیکسرز اپنی خدمات کے لیے کیوری فیس اور انڈیکسنگ کے انعامات حاصل کرتے ہیں۔ وہ کیوری فیس بھی کماتے ہیں جو ایک کفایتی چھوٹ کی تقریب کے مطابق چھوٹ دی جاتی ہیں. @@ -8,37 +9,37 @@ title: انڈیکسنگ انڈیکسرز سب گراف کے کیوریشن سگنل کی بنیاد پر انڈیکس کرنے کے لیے سب گرافس کا انتخاب کرتے ہیں, جہاں کیوریٹرز GRT کو سٹیک کرتے ہیں تاکہ یہ ظاہر کیا جا سکے کہ کون سے سب گرافس اعلیٰ معیار کے ہیں اور انہیں ترجیح دی جانی چاہیے. صارفین (مثلاً ایپلی کیشنز) ایسے عوامل کا تعین کر سکتے ہیں جن کے لیے انڈیکسرز اپنے سب گرافس کے لیے کیوریز پر کارروائی کرتے ہیں اور کیوری کی فیس کی قیمتوں کے لیے ترجیحات طے کرتے ہیں. -## عمومی سوالات +## FAQ -### نیٹ ورک پر انڈیکسر بننے کے لیے کم از کم کتنا سٹیک درکار ہے? +### What is the minimum stake required to be an Indexer on the network? -انڈیکسر کے لیے کم از کم سٹیک فی الحال 100 ہزار GRT پر سیٹ ہے. +The minimum stake for an Indexer is currently set to 100K GRT. -### انڈیکسر کے لیے آمدنی کے سلسلے کیا ہیں؟ +### What are the revenue streams for an Indexer? -**کیوری کی فیس ری بیٹ** - نیٹ ورک پر کیوریز پیش کرنے کے لیے ادائیگیاں. یہ ادائیگیاں ریاستی چینلز کے ذریعے انڈیکسر اور گیٹ وے کے درمیان ثالثی کی جاتی ہیں. گیٹ وے سے ہر کیوری کی درخواست میں ایک ادائیگی اور متعلقہ جواب کیوری کے نتیجہ کی درستگی کا ثبوت ہے. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**انڈیکسنگ کے انعامات** - سالانہ %3 پروٹوکول کے وسیع افراط زر کے ذریعے تیار کیا گیا, انڈیکسنگ کے انعامات ان انڈیکسرز میں تقسیم کیے جاتے ہیں جو نیٹ ورک کے لیے سب گراف کی تعیناتیوں کو ترتیب دے رہے ہیں. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### انڈیکسنگ کے انعامات کیسے تقسیم کیے جاتے ہیں? +### How are indexing rewards distributed? -انڈیکسنگ کے انعامات پروٹوکول کے افراط زر سے آتے ہیں جو کہ %3 سالانہ جاری کرنے پر مقر ر ہے. وہ ہر ایک پر تمام کیوریشن سگنل کے تناسب کی بنیاد پر سب گرافس میں تقسیم کیے جاتے ہیں, پھر اس سب گراف پر ان کے مختص سٹیک کی بنیاد پر انڈیکسرز کو متناسب طور پر تقسیم کیے جاتے ہیں. **ایک مختص کرنے کو انڈیکسنگ کے درست ثبوت (POI) کے ساتھ مختص کرنا ضروری ہے جو ثالثی چارٹر کے ذریعہ مقرر کردہ معیارات پر پورا اترتا ہے تاکہ انعامات کا اہل ہو.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### انڈئکسنگ کا ثبوت (POI) کیا ہے؟ +### What is a proof of indexing (POI)? -POIs کو نیٹ ورک میں اس بات کی تصدیق کرنے کے لیے استعمال کیا جاتا ہے کہ ایک انڈیکسر ان سب گرافس کو انڈیکس کر رہا ہے جو انہوں نے مختص کیے ہیں. موجودہ ایپوک کے پہلے بلاک کے لیے ایک POI جمع کرانا ضروری ہے جب اس ایلوکیشن کے لیے ایلوکیشن کو بند کرتے ہوئے تاکہ انڈیکسنگ کے انعامات کے لیے اہل ہو سکے. ایک بلاک کے لیے POI تمام ہستیاں سٹور کی ٹرانزیکشنز کے لیے ایک ڈائجسٹ ہوتا ہے جس میں اس بلاک تک اور اس سمیت کسی مخصوص سب گراف کی تعیناتی ہوتی ہے. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### انڈیکسنگ کے انعامات کب تقسیم کیے جاتے ہیں؟ +### When are indexing rewards distributed? -ایلوکیشنز مسلسل انعامات حاصل کر رہی ہوتی ہیں جب کہ وہ فعال ہیں اور 28 ایپوکس میں مختص کر دی جاتی ہیں. انعامات اینڈیکسرز کے ذریعے جمع کیے جاتے ہیں، اور ایلوکیشن کے بند ہوتے ہی تقسیم کر دیے جاتے ہیں. یہ یا تو دستی طور پر ہوتا ہے، جب بھی انڈیکسر انہیں زبردستی بند کرنا چاہتا ہے، یا 28 ایپوکس کے بعد ایک ڈیلیگیٹر انڈیکسر کے لیے ایلوکیشن کو بند کر سکتا ہے، لیکن اس کے نتیجے میں کوئی انعام نہیں ہوتا. 28 ایپوکس زیادہ سے زیادہ ایلوکیشن کا دورانیہ ہے (ابھی، ایک ایپوک تقریبا ~ 24 گھنٹے تک رہتا ہے). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### کیا زیر غور انڈیکسنگ کے انعامات کی نگرانی کی جا سکتی ہے؟ +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -کمیونٹی کے بنائے ہوئے بہت سے ڈیش بورڈز میں زیر التواء انعامات کی قدریں شامل ہیں اور ان اقدامات پر عمل کر کے انہیں آسانی سے دستی طور پر چیک کیا جا سکتا ہے: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,129 +57,128 @@ query indexerAllocations { } ``` -`getRewards()` کو کال کرنے کے لیے ایتھر سکین استعمال کریں: +Use Etherscan to call `getRewards()`: -- [انعامات کے کنٹریکٹ پر ایتھرسکین انٹرفیس](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) پر جائیں - -* `getRewards()` کو کال کرنے کے لیے: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - ان پٹ میں **AllocationID** درج کریں. - - **کیوری** بٹن پر کلک کریں. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### تنازعات کیا ہیں اور میں انہیں کہاں دیکھ سکتا ہوں? +### What are disputes and where can I view them? -انڈیکسر کی کیوریز اور ایکوکیشنز دونوں تنازعات کی مدت کے دوران گراف پر متنازعہ ہوسکتے ہیں. تنازعہ کی نوعیت کے لحاظ سے تنازعہ کی مدت مختلف ہوتی ہے. کیوریز/attestations میں تنازعات کے 7 epochs ہوتے ہیں، جبکہ ایلوکیشنز میں 56 epochs ہوتے ہیں. یہ مدت گزر جانے کے بعد، ایلوکیشنز یا کیوریز میں سے کسی کے خلاف تنازعات نہیں کھولے جا سکتے. جب کوئی تنازعہ کھولا جاتا ہے تو، Fishermen کو کم از کم 10,000 GRT جمع کرنا ضروری ہے، جو تنازعہ کے حتمی ہونے اور حل ہونے تک locked رہیں گے. Fishermen کسی بھی نیٹ ورک کے شرکاء ہیں جو تنازعات کو کھولتے ہیں. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -تنازعات کے **تین** ممکنہ نتائج ہوتے ہیں, اسی طرح Fishermen کی جمع کردہ میں. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- اگر تنازعہ مسترد کر دیا جاتا ہے, تو Fishermen کی طرف سے جمع کردہ GRT کو جلا دیا جائے گا، اور متنازعہ انڈیکسر کی کٹوتی نہیں کی جائے گی. -- اگر تنازعہ قرعہ اندازی کے طور پر طے پا جاتا ہے، تو Fishermen کی جمع رقم واپس کر دی جائے گی، اور متنازعہ انڈیکسر کی کٹوتی نہیں کی جائے گی. -- اگر تنازعہ قبول کر لیا جاتا ہے، تو Fishermen کی طرف سے جمع کرائی گئی GRT واپس کر دی جائے گی، متنازعہ انڈیکسر کی کٹوتی کر لی جائے گی اور Fishermen کٹوتی کے GRT کا 50% کمائیں گے. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -تنازعات کو `Disputes` ٹیب کے نیچے انڈیکسر کے پروفائل پیج کی UI میں دیکھا جا سکتا ہے. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### کیوری فیس ری بیٹس کیا ہیں اور وہ کب تقسیم کی جاتی ہیں? +### What are query fee rebates and when are they distributed? -کیوری فیس گیٹ وے کے ذریعے جمع کی جاتی ہیں اور انڈیکسرز میں ایکسپونینشل ریبیٹ فنکشن کے مطابق تقسیم کی جاتی ہیں (جی آئی پی [یہاں](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162) دیکھیں)۔ ایکسپونینشل ریبیٹ فنکشن اس بات کو یقینی بنانے کے طریقے کے طور پر تجویز کیا گیا ہے کہ انڈیکسرز ایمانداری کے ساتھ کیوریز کو پیش کرتے ہوئے بہترین نتائج حاصل کریں۔ یہ انڈیکسرز کو حصص کی ایک بڑی رقم مختص کرنے کی ترغیب دے کر کام کرتا ہے (جسے کیوری کرتے وقت غلطی کی وجہ سے کم کیا جا سکتا ہے) کیوری فیس کی مقدار کے مطابق جو وہ جمع کر سکتے ہیں. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -ایک بار مختص کرنے کے بعد ریبیٹ انڈیکسر کے ذریعہ دعوی کرنے کے لئے دستیاب ہے۔ دعویٰ کرنے پر،کیوری فیس کی چھوٹ انڈیکسر اور ان کے ڈیلیگیٹرز کو کیوری فیس میں کٹوتی اور ایکسپونینشل ریبیٹ فنکشن کی بنیاد پر تقسیم کی جاتی ہے. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### کیوری فی کٹ اور انڈیکسنگ ریوارڈ کٹ کیا ہے؟ +### What is query fee cut and indexing reward cut? -`queryFeeCut` اور `indexingRewardCut` قدریں ڈیلی گیشن پیرامیٹر ہیں جنہیں انڈیکسر, انڈیکسر اور ان کے ڈیلیگیٹرز کے درمیان GRT کی تقسیم کو کنٹرول کرنے کے لیے cooldownBlocks کے ساتھ سیٹ کر سکتا ہے. ڈیلیگیشن کے پیرامیٹرز کو ترتیب دینے کے لیے ہدایات کے لیے [پروٹوکول میں حصہ لینا](/indexing/overview/#stake-in-the-protocol) میں آخری مراحل دیکھیں. +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **کیوری فیس کٹ** - کیوری فیس کی چھوٹ کا % جو انڈیکسر میں تقسیم کیا جائے گا۔ اگر اسے 95% پر سیٹ کیا جاتا ہے، تو انڈیکسر کو 95% کیوری فیس موصول ہو گی جب ایک مختص بند ہو جائے گا اور باقی 5% ڈیلیگیٹرز کو جائے گا. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **انڈیکسنگ ریوارڈ کٹ** - انڈیکسنگ کے انعامات کا % جو انڈیکسر میں تقسیم کیے جائیں گے۔ اگر اسے 95% پر سیٹ کیا جاتا ہے تو، مختص کرنے کے بند ہونے پر انڈیکس کرنے والے کو انڈیکسنگ انعامات کا 95% ملے گا اور ڈیلیگیٹرز باقی 5% کو تقسیم کر دیں گے. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### انڈیکسر کیسے جانتے ہیں کہ کون سے سب گرافس کو انڈیکس کرنا ہے؟ +### How do Indexers know which subgraphs to index? -انڈیکسرز سب گراف انڈیکسنگ کے فیصلے کرنے کے لیے جدید تکنیکوں کو استعمال کر کے خود کو نمایاں کر سکتے ہیں لیکن ایک عمومی خیال دینے کے لیے ہم نیٹ ورک میں سب گراف کی جانچ کے لیے استعمال ہونے والی کئی کلیدی میٹرکس پر بات کریں گے: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **کیوریشن سگنل** - کسی خاص سب گراف پر لاگو نیٹ ورک کیوریشن سگنل کا تناسب اس سب گراف میں دلچسپی کا ایک اچھا اشارہ ہے, خاص طور پر بوٹسٹریپ مرحلے کے دوران جب کیوری کا حجم بڑھ رہا ہوتا ہے. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **جمع کی گئی کیوری فیس** - ایک مخصوص سب گراف کے لیے جمع کردہ کیوری فیس کی مقدار کا تاریخی ڈیٹا مستقبل کی طلب کا ایک اچھا اشارہ ہے. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **سٹیک پر لگی رقم** - دوسرے انڈیکسرز کے رویے کی نگرانی کرنا یا مخصوص سب گراف کے لیے مختص کل حصص کے تناسب کو دیکھنا انڈیکسر کو سب گراف کی کیوریز کے لیے سپلائی سائیڈ کی نگرانی کرنے کی اجازت دے سکتا ہے تاکہ ان سب گرافوں کی شناخت کی جا سکے جن پر نیٹ ورک اعتماد ظاہر کر رہا ہے یا ان سب گرافس جو مزید سپلائی کی ضرورت کو ظاہر کر سکتے ہیں. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **انڈیکسنگ انعامات کے بغیر سب گراف** - کچھ سب گراف بنیادی طور پر اس وجہ سے انڈیکسنگ کے انعامات نہیں بناتے کہ وہ IPFS جیسی غیر تعاون یافتہ خصوصیات استعمال کر رہے ہیں یا اس وجہ سے کہ وہ مین نیٹ سے باہر کسی دوسرے نیٹ ورک کو کیوری کر رہے ہیں. آپ کو سب گراف پر ایک پیغام نظر آئے گا اگر یہ انڈیکسنگ کے انعامات نہیں بنا رہا ہے. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### ہارڈ ویئر کی ضروریات کیا ہیں؟ +### What are the hardware requirements? -- **چھوٹا** - کئی سب گرافس کی انڈیکسنگ شروع کرنے کے لیے کافی ہے، ممکنہ طور پر توسیع کی ضرورت ہوگی. -- **معیاری** - پہلے سے طے شدہ سیٹ اپ، یہ وہی ہے جو مثال کے k8s/terraform کے تعیناتی مینی فیسٹس میں استعمال ہوتا ہے. -- **درمیانہ** - پروڈکشن انڈیکسر 100 سب گراف اور 200-500 درخواستیں فی سیکنڈ کو اٹھا سکتا ہے. -- **بڑا** - تمام فی الحال زیر استعمال سب گرافس کو انڈیکس کرنے اور متعلقہ ٹریفک کے لیے درخواستیں پیش کرنے کے لیے تیار ہے. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| سیٹ اپ | Postgres
(CPUs) | Postgres
(GBs میں میموری) | Postgres
(TBs میں ڈسک) | VMs
(CPUs) | VMs
(GBs میں میموری) | -| --- | :-: | :-: | :-: | :-: | :-: | -| چھوٹا | 4 | 8 | 1 | 4 | 16 | -| معیاری | 8 | 30 | 1 | 12 | 48 | -| درمیانہ | 16 | 64 | 2 | 32 | 64 | -| بڑا | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### وہ کون سی چند بنیادی حفاظتی تدابیر ہیں جو ایک انڈیکسر کو اختیار کرنی چاہیے؟ +### What are some basic security precautions an Indexer should take? -- **آپریٹر والیٹ** - آپریٹر والیٹ کا سیٹ اپ کرنا ایک اہم احتیاط ہے کیونکہ یہ ایک انڈیکسر کو اپنی کلیدوں کے درمیان علیحدگی برقرار رکھنے کی اجازت دیتا ہے جو سٹیک کو کنٹرول کرتی ہیں اور جو روزمرہ کے کاموں کے کنٹرول میں ہیں. ہدایات کے لیے [پروٹوکول میں حصہ ڈالنا](/indexing/overview/#stake-in-the-protocol) دیکھیں. +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **فائر وال** - صرف انڈیکسر سروس کو عوامی طور پر ظاہر کی ضرورت ہے اور ایڈمن پورٹس اور ڈیٹا بیس تک رسائی کو لاک ڈاؤن کرنے پر خاص توجہ دی جانی چاہیے: گراف نوڈ JSON-RPC اینڈ پوائنٹ (ڈیفالٹ پورٹ: 8030)، انڈیکسر مینجمنٹ API endpoint (ڈیفالٹ پورٹ: 18000)، اور Postgres ڈیٹا بیس اینڈ پوائنٹ (ڈیفالٹ پورٹ: 5432) کو ظاہر نہیں کرنا چاہیے. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## انفراسٹرکچر +## Infrastructure -انڈیکسر کے بنیادی ڈھانچے کے مرکز میں گراف نوڈ ہوتا ہے جو انڈیکسڈ نیٹ ورکس کی نگرانی کرتا ہے، سب گراف کی تعریف کے مطابق ڈیٹا کو نکالتا اور لوڈ کرتا ہے اور اسے [GraphQL API](/about/#how-the-graph-works) کے طور پر کام کرتا ہے۔ گراف نوڈ کو ہر انڈیکسڈ نیٹ ورک سے ڈیٹا کو ظاہر کرنے والے اینڈ پوائنٹ سے منسلک ہونے کی ضرورت ہے۔ ڈیٹا سورسنگ کے لیے ایک IPFS نوڈ؛ اس کے اسٹور کے لیے ایک PostgreSQL ڈیٹا بیس؛ اور انڈیکسر اجزاء جو نیٹ ورک کے ساتھ اس کے تعامل کو آسان بناتے ہیں. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL ڈیٹا بیس** - گراف نوڈ کا مرکزی اسٹور، یہ وہ جگہ ہے جہاں سب گراف ڈیٹا محفوظ کیا جاتا ہے. انڈیکسر سروس اور ایجنٹ سٹیٹ چینل کے ڈیٹا، لاگت کے ماڈل، انڈیکسنگ کے قواعد، اور ایلوکیشن کارروائیوں کو ذخیرہ کرنے کے لیے بھی ڈیٹا بیس کا استعمال کرتے ہیں. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **ڈیٹا اینڈ پوائنٹ** - EVM-مطابقت پذیر نیٹ ورکس کے لیے، گراف نوڈ کو ایک ایسے اینڈ پوائنٹ سے منسلک کرنے کی ضرورت ہے جو EVM کے موافق JSON-RPC API کو ظاہر کرے۔ یہ ایک کلائنٹ کی شکل اختیار کر سکتا ہے یا یہ زیادہ پیچیدہ سیٹ اپ ہو سکتا ہے جو متعدد پر بیلنس کو لوڈ کرتا ہے۔ یہ جاننا ضروری ہے کہ کچھ سب گراف کے لیے مخصوص کلائنٹ کی صلاحیتوں کی ضرورت ہوگی جیسے کہ آرکائیو موڈ اور/یا پیریٹی ٹریسنگ API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS نوڈ (ورزن 5 سے کم)** - سب گراف تعیناتی میٹا ڈیٹا IPFS نیٹ ورک پر محفوظ کیا جاتا ہے. گراف نوڈ بنیادی طور پر سب گراف کی تعیناتی کے دوران IPFS نوڈ تک رسائی حاصل کرتا ہے تاکہ سب گراف مینی فیسٹ اور تمام منسلک فائلوں کو حاصل کیا جا سکے. نیٹ ورک انڈیکسرز کو اپنے IPFS نوڈ کی میزبانی کرنے کی ضرورت نہیں ہے، نیٹ ورک کے لیے ایک IPFS نوڈ https://ipfs.network.thegraph.com پر ہوسٹ کیا گیا ہے. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **انڈیکسر سروس** - نیٹ ورک کے ساتھ تمام مطلوبہ بیرونی مواصلات کو ہینڈل کرتا ہے۔ لاگت کے ماڈلز اور انڈیکسنگ کے حالات کا اشتراک کرتا ہے، گیٹ ویز سے کیوری کی درخواستوں کو گراف نوڈ پر منتقل کرتا ہے، اور گیٹ وے کے ساتھ سٹیٹ چینلز کے ذریعے کیوری کی ادائیگیوں کا انتظام کرتا ہے. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **انڈیکسر ایجنٹ** - نیٹ ورک پر رجسٹریشن، اس کے گراف نوڈ/س میں سب گراف کی تعیناتیوں کا انتظام، اور ایلوکیشنز کا نظم کرنے سمیت چین پر انڈیکسرز کے تعاملات کی سہولت فراہم کرتا ہے. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **Prometheus میٹرکس سرور** - گراف نوڈ اور انڈیکسر کمپونینٹس اپنے میٹرکس کو میٹرکس سرور پر لاگ کرتے ہیں. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -نوٹ: فرتیلی پیمائی کو سپورٹ کرنے کے لیے، یہ تجویز کیا جاتا ہے کہ کیوری اور انڈیکسنگ کے معاملات کو نوڈس کے مختلف سیٹس کے درمیان الگ کیا جائے: کیوری نوڈس اور انڈیکس نوڈس. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### پورٹس کا جائزہ +### Ports overview -> **اہم بات**: پورٹس کو عوامی طور پر ظاہر کرنے کے بارے میں محتاط رہیں - **انتظامی پورٹس** کو لاک ڈاؤن رکھا جانا چاہیے. اس میں گراف نوڈ JSON-RPC اور نیچے دیے گئے انڈیکسر مینجمنٹ اینڈ پوائنٹس شامل ہیں. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### گراف نوڈ -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(سب گراف کی کیوریز کے لیے) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(سب گراف سبسکرپشنز کے لیے) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(تعیناتیوں کے انتظام کے لیے) | / | --admin-port | - | -| 8030 | سب گراف انڈیکسنگ اسٹیٹس API | /graphql | --index-node-port | - | -| 8040 | Prometheus میٹرکس | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### انڈیکسر سروس +#### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(ادا شدہ سب گراف کی کیوریز کے لیے) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus میٹرکس | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### انڈیکسر ایجنٹ +#### Indexer Agent -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | انڈیکسر مینجمنٹ API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### گوگل کلاؤڈ پر ٹیرا فورم کا استعمال کرتے ہوئے سرور کا بنیادی ڈھانچہ ترتیب دیں +### Setup server infrastructure using Terraform on Google Cloud -> نوٹ: انڈیکسرز متبادل طور پر AWS، Microsoft Azure، یا Alibaba استعمال کر سکتے ہیں. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### اولین ضروریات کو انسٹال کریں +#### Install prerequisites - Google Cloud SDK - Kubectl command line tool - Terraform -#### گوگل کلاؤڈ پروجیکٹ بنائیں +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ query indexerAllocations { cd terraform ``` -- گوگل کلاؤڈ کے ساتھ تصدیق کریں اور ایک نیا پروجیکٹ بنائیں. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- نئے پروجیکٹ کے لیے بلنگ کو فعال کرنے کے لیے گوگل کلاؤڈ کنسول کا بلنگ والا صفحہ استعمال کریں. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- گوگل کلاؤڈ کنفگریشن بنائیں. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- مطلوبہ گوگل کلاؤڈ APIs کو فعال کریں. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- ایک سروس اکاؤنٹ بنائیں. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- ڈیٹا بیس اور Kubernetes کلسٹر کے درمیان پیئرنگ کو فعال کریں جو اگلے مرحلے میں بنائے جائیں گے. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,22 +249,22 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- کم سے کم terraform کنفیگریشن فائل بنائیں (ضرورت کے مطابق اپ ڈیٹ کریں). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < **نوٹ**: تمام runtime کنفیگریشن variables یا تو startup پر کمانڈ پر پیرامیٹرز کے طور پر لاگو کیے جا سکتے ہیں یا `COMPONENT_NAME_VARIABLE_NAME` format کے environment variables کا استعمال کرتے ہوئے (مثال کے طور پر `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### انڈیکسر ایجنٹ +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### انڈیکسر سروس +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -514,58 +514,58 @@ graph-indexer-service start \ | pino-pretty ``` -#### انڈیکسر CLI +#### Indexer CLI -Indexer CLI [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) کے لیے ایک plugin ہے جس کو `گراف انڈیکسر` پر terminal میں قابل رسائی حاصل ہے. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### انڈیکسر CLI کا استعمال کرتے ہوئے انڈیکسر کا انتظام +#### Indexer management using Indexer CLI -**انڈیکسر مینجمنٹ API** کے ساتھ تعامل کے لیے تجویز کردہ ٹول **انڈیکسر CLI** ہے، جو کہ **گراف CLI** کی توسیع ہے. انڈیکسر ایجنٹ کو انڈیکسر سے ان پٹ کی ضرورت ہوتی ہے تاکہ انڈیکسر کی جانب سے نیٹ ورک کے ساتھ خود مختاری سے تعامل کیا جا سکے. انڈیکسر ایجنٹ کے رویے کی وضاحت کرنے کا طریقہ کار **مختص کا انتظام** موڈ اور **انڈیکسنگ کے قواعد** ہیں. آٹو موڈ میں، ایک انڈیکسر **انڈیکسنگ کے قواعد** کا استعمال کر سکتا ہے تاکہ انڈیکس اور کیوری پیش کرنے میں سب گرافوں کو چننے کے لیے اپنی مخصوص حکمت عملی کو لاگو کیا جا سکے. اصولوں کا نظم ایک GraphQL API کے ذریعے کیا جاتا ہے جو ایجنٹ کے ذریعہ پیش کیا جاتا ہے اور اسے انڈیکسر مینیجمینٹ API کے نام سے جانا جاتا ہے. دستی موڈ میں، ایک انڈیکسر **اعمال کی قطار** کا استعمال کرتے ہوئے مختص کی کارروائیاںمختص کی کارروائیاں بنا سکتا ہے اور ان کے چلنے سے پہلے انہیں واضح طور پر منظور کر سکتا ہے۔ نگرانی کے موڈ میں، **انڈیکسنگ کے قواعد** کا استعمال **اعمال کی قطار** کو آباد کرنے کے لیے کیا جاتا ہے اور اس کو چلانے کے لیے واضح منظوری بھی درکار ہوتی ہے. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### استعمال +#### Usage -**انڈیکسر CLI** انڈیکسر ایجنٹ سے جڑتی ہے، عام طور پر پورٹ فارورڈنگ کے ذریعے، لہذا CLI کو ایک ہی سرور یا کلسٹر پر چلانے کی ضرورت نہیں ہے. شروع کرنے میں آپ کی مدد کرنے کے لیے، اور کچھ سیاق و سباق فراہم کرنے کے لیے، CLI کو یہاں مختصراً بیان کیا جائے گا. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `graph indexer connect ` - انڈیکسر مینیجمینٹ API سے جڑیں. عام طور پر سرور سے کنکشن, پورٹ فارورڈنگ کے ذریعے کھولا جاتا ہے، لہذا CLI آسانی سے دور سے چلایا جا سکتا ہے. (مثال: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - تمام قواعد حاصل کرنے کے لیے `all` کو بطور `` استعمال کرتے ہوئے ایک یا زیادہ انڈیکسر کے قواعد حاصل کریں، یا عالمی ڈیفالٹس حاصل کرنے کے لیے `global` کو استعمال کریں. ایک اضافی دلیل `--merged` کو یہ بتانے کے لیے استعمال کیا جا سکتا ہے کہ تعیناتی کے مخصوص اصول عالمی اصول کے ساتھ ضم ہو گئے ہیں. انڈیکسر ایجنٹ میں ان کا اطلاق اس طرح ہوتا ہے. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - ایک یا اس سے زیادہ انڈیکسنگ کے اصول مرتب کریں. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `graph indexer rules start [options] ` - اگر دستیاب ہو تو سب گراف کی تعیناتی کو انڈیکس کرنا شروع کریں اور اس کا `decisionBasis` کو `Always` پر سیٹ کریں، اس لیے انڈیکسر ایجنٹ ہمیشہ اسے انڈیکس کرنے کا انتخاب کرے گا. اگر عالمی اصول کو ہمیشہ پر سیٹ کیا جاتا ہے تو نیٹ ورک پر دستیاب تمام سب گرافس کو انڈیکس کیا جائے گا. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `graph indexer rules stop [options] ` - کسی تعیناتی کو انڈیکس کرنا بند کریں اور اس کا `decisionBasis` never پر سیٹ کریں، لہذا یہ انڈیکس میں تعیناتیوں کا فیصلہ کرتے وقت اس تعیناتی کو چھوڑ دے گا. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `graph indexer rules maybe [options] ` — تعیناتی کے لیے `decisionBasis` کو `rules` پر سیٹ کریں، تاکہ انڈیکسر ایجنٹ یہ فیصلہ کرنے کے لیے انڈیکسنگ کے اصول استعمال کرے کہ آیا اس تعیناتی کو انڈیکس کرنا ہے. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `graph indexer action queue allocate ` - قطار(Queue) مختص کرنے کی کارروائی +- `graph indexer action queue allocate ` - Queue allocation action - `graph indexer action queue reallocate ` - Queue reallocate action - `graph indexer action queue unallocate ` - Queue unallocate action -- `graph indexer actions cancel [ ...]` - اگر id غیر متعین ہے تو queue میں موجود تمام کارروائیوں کو منسوخ کریں، بصورت دیگر seperator کے طور پر space کے ساتھ id کی array کو منسوخ کریں +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `graph indexer actions approve [ ...]` - عملدرآمد کے لیے متعدد کارروائیوں کو منظور کریں +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `graph indexer actions execute approve` - کارکن کو فوری طور پر منظور شدہ کارروائیوں کو انجام دینے پر مجبور کریں +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -تمام کمانڈز جو output میں قواعد کو ظاہر کرتی ہیں وہ `-output` دلیل(argument) کا استعمال کرتے ہوئے معاون output فارمیٹس (`table`، `yaml`، اور `json`) کے درمیان انتخاب کرسکتے ہیں. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### انڈیکسنگ کے قواعد +#### Indexing rules -انڈیکسنگ کے قوانین یا تو عالمی ڈیفالٹس کے طور پر لاگو کیے جا سکتے ہیں یا ان کی IDs کا استعمال کرتے ہوئے مخصوص سب گراف کی تعیناتیوں کے لیے۔ `deployment` اور `decisionBasis` فیلڈز لازمی ہیں، جبکہ دیگر تمام فیلڈز اختیاری ہیں. جب انڈیکسنگ کے اصول میں `rules` بطور `decisionBasis` ہوتا ہے، تو انڈیکسر ایجنٹ اس اصول پر non-null حد کی قدروں کا موازنہ متعلقہ تعیناتی کے لیے نیٹ ورک سے حاصل کردہ اقدار سے کرے گا. اگر سب گراف کی تعیناتی میں کسی بھی حد سے اوپر (یا نیچے) قدریں ہیں تو اسے انڈیکسنگ کے لیے منتخب کیا جائے گا. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -مثال کے طور پر، اگر عالمی اصول میں `minStake` **5** (GRT) ہے، تو کسی بھی سب گراف کی تعیناتی جس میں 5 (GRT) سے زیادہ سٹیک مختص کیا گیا ہے، انڈیکس کیا جائے گا. حد کے اصولوں میں شامل ہیں `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, اور `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -ڈیٹا ماڈل: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -انڈیکسنگ کے اصول کے استعمال کی مثال: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -613,18 +613,18 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK #### Actions queue CLI -Indexer-cli کارروائی کی قطار کے ساتھ دستی طور پر کام کرنے کے لیے ایک `actions` ماڈیول فراہم کرتا ہے. یہ کارروائی کی قطارe کے ساتھ تعامل کرنے کے لیے انڈیکسر مینیجمینٹ سرور کے زیر اہتمام **Graphql API** کا استعمال کرتا ہے. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -عمل پر عملدرآمد کرنے والا کارکن صرف اس صورت میں قطار سے آئٹمز کو پکڑے گا جب ان کے پاس `ActionStatus = approved` ہو. تجویز کردہ راستے میں کارروائیوں کو ActionStatus = queued کے ساتھ قطار میں شامل کیا جاتا ہے، لہذا ان کو آن چین پر عمل درآمد کرنے کے لیے منظور ہونا ضروری ہے۔ عام بہاؤ اس طرح نظر آئے گا: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- فریق ثالث کا اصلاح کنندہ ٹول یا indexer-cli صارف کے ذریعے قطار میں ایکشن شامل کیا گیا -- انڈیکسر تمام queued کارروائیوں کو دیکھنے کے لیے `indexer-cli` استعمال کر سکتا ہے -- انڈیکسر (یا دیگر سافٹ ویئر) `indexer-cli` کا استعمال کرتے ہوئے queue میں کارروائیوں کو منظور یا منسوخ کر سکتا ہے۔ approve اور cancel کی کمانڈز کے بطور ان پٹ action ids کی ایک ایرے لیتی ہیں. -- Execution worker باقاعدگی سے منظور شدہ کارروائیوں کے لیے قطار میں پولنگ کرتا ہے۔ یہ قطار سے `approved` کارروائیوں کو پکڑے گا، ان کو چلانے کی کوشش کرے گا، اور db میں اقدار کو `success` یا `failed` پر عمل درآمد کی حیثیت کے لحاظ سے اپ ڈیٹ کرے گا. -- اگر کوئی کارروائی کامیاب ہوتی ہے تو کارکن اس بات کو یقینی بنائے گا کہ ایک انڈیکسنگ کا اصول موجود ہے جو ایجنٹ کو بتاتا ہے کہ ایلوکیشن کو آگے بڑھنے کا طریقہ کس طرح منظم کرنا ہے، جب ایجنٹ `auto` یا `oversight` موڈ میں ہوتا ہے تو دستی کارروائی کرتے وقت مفید ہوتا ہے. -- انڈیکسر ایکشن کے عمل کی تاریخ دیکھنے کے لیے action queue کی نگرانی کر سکتا ہے اور اگر ضرورت ہو تو ایکشن کے اجزاء کو دوبارہ منظور اور اپ ڈیٹ کر سکتا ہے اگر وہ عمل درآمد میں ناکام رہے. action queue تمام queued اور taken اعمال کی تاریخ فراہم کرتی ہے. +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -ڈیٹا ماڈل: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -سورس سے استعمال کی مثال: +Example usage from source: ```bash graph indexer actions get all @@ -677,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -نوٹ کریں کہ ایلوکیشن کے انتظام کے لیے معاون کارروائی کی اقسام میں مختلف ان پٹ تقاضے ہوتے ہیں: +Note that supported action types for allocation management have different input requirements: -- `Allocate` - ایک مخصوص سب گراف کی تعیناتی میں سٹیک مختص کریں +- `Allocate` - allocate stake to a specific subgraph deployment - - مطلوبہ ایکشن params: + - required action params: - deploymentID - amount -- `Unallocate` - ایلوکیشن بند کریں، دوسری جگہوں پر دوبارہ مختص کرنے کے لیے سٹیک کو آزاد کریں +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - مطلوبہ ایکشن params: + - required action params: - allocationID - deploymentID - - اختیاری ایکشن params: + - optional action params: - poi - - فورس (فراہم کردہ POI استعمال کرنے والی فورس یہاں تک کہ اگر یہ گراف نوڈ کے فراہم کردہ سے مماثل نہیں ہے) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - جوہری طور پر ایلوکیشن کو بند کریں اور اسی سب گراف کی تعیناتی کے لیے ایک تازہ ایلوکیشن کھولیں +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - مطلوبہ ایکشن params: + - required action params: - allocationID - deploymentID - amount - - اختیاری ایکشن params: + - optional action params: - poi - - فورس (فراہم کردہ POI استعمال کرنے والی فورس یہاں تک کہ اگر یہ گراف نوڈ کے فراہم کردہ سے مماثل نہیں ہے) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### لاگت کے ماڈل +#### Cost models -لاگت کے ماڈل مارکیٹ اور کیوری کی خصوصیات پر مبنی کیوریز کے لیے متحرک قیمت فراہم کرتے ہیں. انڈیکسر سروس ہر سب گراف کے گیٹ ویز کے ساتھ لاگت ماڈل شیئر کرتی ہے جس کے لیے وہ کیوریز کا جواب دینا چاہتے ہیں. گیٹ وے، بدلے میں، فی کیوری انڈیکسر کے انتخاب کے فیصلے کرنے اور منتخب کردہ انڈیکسرز کے ساتھ ادائیگی پر بات چیت کرنے کے لیے لاگت ماڈلز کا استعمال کرتے ہیں. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Agora زبان کیوریز کے لیے لاگت ماڈل بنانے کے لیے ایک لچکدار فارمیٹ فراہم کرتی ہے. Agora لاگت کے ماڈل سٹیٹمنٹس کا ایک سلسلہ ہے جو GraphQL کیوری میں ہر اعلیٰ درجے کی کیوری کے لیے ترتیب دیتا ہے. ہر top-level کیوری کے لیے، پہلا بیان جو اس سے ملتا ہے اس کیوری کی قیمت کا تعین کرتا ہے. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -ایک بیان ایک پیشن گوئی پر مشتمل ہوتی ہے، جو GraphQL کی کیوریز کے ملاپ کے لیے استعمال ہوتی ہے، اور ایک cost expression جس کا جب اندازہ کیا جاتا ہے تو اعشاریہ GRT میں لاگت نکلتی ہے. کیوری کی نامزد argument پوزیشن میں اقدار کو پیش گوئی میں پکڑا جاسکتا ہے اور expression میں استعمال کیا جاسکتا ہے. Globals کو ایک expression میں placeholders کے لیے بھی سیٹ اور متبادل کیا جا سکتا ہے. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -مثال کا cost model: +Example cost model: ``` # This statement captures the skip value, @@ -727,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -مندرجہ بالا ماڈل کا استعمال کرتے ہوئے کیوری کی قیمت کی مثال: +Example query costing using the above model: -| کیوری | قیمت | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Cost ماڈل لاگو کرنا +#### Applying the cost model -لاگت کے ماڈلز کا اطلاق انڈیکسر CLI کے ذریعے کیا جاتا ہے، جو ڈیٹا بیس میں ذخیرہ کرنے کے لیے انڈیکسر ایجنٹ کے انڈیکسر مینجمنٹ API کو بھیجتا ہے. اس کے بعد انڈیکسر سروس انہیں اٹھائے گی اور لاگت کے ماڈلز کو گیٹ ویز پر پیش کرے گی جب بھی وہ ان سے پوچھیں گے. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## نیٹ ورک کے ساتھ تعامل +## Interacting with the network -### پروٹوکول میں حصہ ڈالنا +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -ایک بار جب ایک انڈیکسر نے پروٹوکول میں GRT کو سٹیک کر دیا ہے، تو [انڈیکسر اجزاء](/indexing/overview/#indexer-components) کو شروع کیا جا سکتا ہے اور نیٹ ورک کے ساتھ اپنے تعاملات شروع کر سکتے ہیں. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### ٹوکنز منظور کریں +#### Approve tokens -1. [Remix ایپ ](https://remix.ethereum.org/) کو ایک براؤزر میں کھولیں +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `File Explorer` میں [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json) کے ساتھ **GraphToken.abi** کے نام سے ایک فائل بنائیں. +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. ماحول کے نیچے `Injected Web3` کو منتخب کریں اور `Account` کے نیچے اپنا انڈیکسر ایڈریس منتخب کریں. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. گراف ٹوکن کے کنٹریکٹ ایڈریس کو سیٹ کریں - گراف ٹوکن کنٹریکٹ ایڈریس (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) کو `At Address` کے آگے چسپاں کریں اور لاگو کرنے کے لیے `At address` بٹن پر کلک کریں. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. اسٹیکنگ کنٹریکٹ کو منظور کرنے کے لیے `approve(spender, amount)` فنکشن کو کال کریں. `spender` کو اسٹیکنگ کنٹریکٹ ایڈریس (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) اور `amount` کو داؤ پر لگانے والے ٹوکنز کے ساتھ (wei میں) بھریں. +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### سٹیک ٹوکنز +#### Stake tokens -1. [Remix ایپ ](https://remix.ethereum.org/) کو ایک براؤزر میں کھولیں +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `File Explorer` میں اسٹیکنگ ABI کے ساتھ **Staking.abi** کے نام سے ایک فائل بنائیں. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. -4. ماحول کے نیچے `Injected Web3` کو منتخب کریں اور `Account` کے نیچے اپنا انڈیکسر کے ایڈریس کو منتخب کریں. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. اسٹیکنگ کنٹریکٹ ایڈریس سیٹ کریں - اسٹیکنگ کنٹریکٹ ایڈریس (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) کو `At Address` کے آگے چسپاں کریں اور لاگو کرنے کے لیے `At address` بٹن پر کلک کریں. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. پروٹوکول میں GRT کو داؤ پر لگانے کے لیے `stake()` کو کال کریں. +6. Call `stake()` to stake GRT in the protocol. -7. (اختیاری) انڈیکسرز اپنے انڈیکسر انفراسٹرکچر کے لیے آپریٹر بننے کے لیے ایک اور ایڈریس کی منظوری دے سکتے ہیں تاکہ فنڈز کو کنٹرول کرنے والی کلیدوں کو ان سے الگ کر سکیں جو روزمرہ کے کاموں کو انجام دے رہے ہیں جیسے سب گراف کو مختص کرنا اور (ادائیگی) کیوریز پیش کرنا. آپریٹر کو سیٹ کرنے کے لیے آپریٹر ایڈریس کے ساتھ `setOperator()` کال کریں. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (اختیاری) انعامات کی تقسیم کو کنٹرول کرنے اور اسٹریٹجک طور پر ڈیلیگیٹرز کو راغب کرنے کے لیے انڈیکسرز اپنے indexingRewardCut (پارٹس فی ملین)، queryFeeCut (پارٹس فی ملین) اور cooldownBlocks (بلاکس کی تعداد) کو اپ ڈیٹ کرکے اپنے ڈیلیگیشن پیرامیٹرز کو اپ ڈیٹ کرسکتے ہیں. ایسا کرنے کے لیے `setDelegationParameters()` کو کال کریں۔ درج ذیل مثال queryFeeCut کو سیٹ کرتی ہے کہ کیوری کی چھوٹ کا 95% انڈیکسر کو اور 5% ڈیلیگیٹرز کو تقسیم کرے، IndexingRewardCut کو سیٹ کریں کہ 60% انڈیکسنگ ریوارڈز انڈیکسر کو اور 40% ڈیلیگیٹرز کو تقسیم کریں، اور `thecooldownBlocks` کا دورانیہ 500 بلاکس سیٹ کریں. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### ایک ایلوکیشن کا دورانیہ +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -انڈیکسرز کو تجویز کی جاتی ہے کہ وہ offchain مطابقت پذیری کی فعالیت کو استعمال کریں تاکہ on-chain ایلوکیشن سے پہلے سب گراف کی تعیناتیوں کو chainhead سے ہم آہنگ کیا جا سکے. یہ خصوصیت خاص طور پر ان سب گرافوں کے لیے مفید ہے جن کی مطابقت پذیری میں 28 ایپوک سے زیادہ وقت لگ سکتا ہے یا غیر یقینی طور پر ناکام ہونے کے کچھ امکانات ہوتے ہیں. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From b1495a07a91034f619157fa4f908fe1d903b75b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:53 -0500 Subject: [PATCH 0087/1534] New translations overview.mdx (Vietnamese) --- website/src/pages/vi/indexing/overview.mdx | 284 ++++++++++----------- 1 file changed, 142 insertions(+), 142 deletions(-) diff --git a/website/src/pages/vi/indexing/overview.mdx b/website/src/pages/vi/indexing/overview.mdx index ac39e292a2ec..9ad6f8fd18d9 100644 --- a/website/src/pages/vi/indexing/overview.mdx +++ b/website/src/pages/vi/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexing +title: Indexing Overview +sidebarTitle: Tổng quan --- Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. @@ -8,7 +9,7 @@ GRT that is staked in the protocol is subject to a thawing period and can be sla Indexer chọn các subgraph để index dựa trên tín hiệu curation của subgraph, trong đó Curator stake GRT để chỉ ra subgraph nào có chất lượng cao và cần được ưu tiên. Bên tiêu dùng (ví dụ: ứng dụng) cũng có thể đặt các tham số (parameter) mà Indexer xử lý các truy vấn cho các subgraph của họ và đặt các tùy chọn cho việc định giá phí truy vấn. -## CÂU HỎI THƯỜNG GẶP +## FAQ ### What is the minimum stake required to be an Indexer on the network? @@ -26,11 +27,11 @@ Indexing rewards come from protocol inflation which is set to 3% annual issuance Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### Bằng chứng lập chỉ mục (proof of indexing - POI) là gì? +### What is a proof of indexing (POI)? POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### Khi nào Phần thưởng indexing được phân phối? +### When are indexing rewards distributed? Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). @@ -38,7 +39,7 @@ Allocations are continuously accruing rewards while they're active and allocated The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -Nhiều trang tổng quan (dashboard) do cộng đồng tạo bao gồm các giá trị phần thưởng đang chờ xử lý và bạn có thể dễ dàng kiểm tra chúng theo cách thủ công bằng cách làm theo các bước sau: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -56,34 +57,33 @@ query indexerAllocations { } ``` -Sử dụng Etherscan để gọi `getRewards()`: +Use Etherscan to call `getRewards()`: -- Điều hướng đến [giao diện Etherscan đến hợp đồng Rewards](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* Để gọi `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - Nhập **allocationID** trong đầu vào. + - Enter the **allocationID** in the input. - Click the **Query** button. -### Tranh chấp là gì và tôi có thể xem chúng ở đâu? +### What are disputes and where can I view them? -Các truy vấn và phần phân bổ của Indexer đều có thể bị tranh chấp trên The Graph trong thời gian tranh chấp. Thời hạn tranh chấp khác nhau, tùy thuộc vào loại tranh chấp. Truy vấn / chứng thực có cửa sổ tranh chấp 7 epoch (kỷ nguyên), trong khi phần phân bổ có 56 epoch. Sau khi các giai đoạn này trôi qua, không thể mở các tranh chấp đối với phần phân bổ hoặc truy vấn. Khi một tranh chấp được mở ra, các Fisherman yêu cầu một khoản stake tối thiểu là 10.000 GRT, sẽ bị khóa cho đến khi tranh chấp được hoàn tất và giải pháp đã được đưa ra. Fisherman là bất kỳ người tham gia mạng nào mà đã mở ra tranh chấp. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -Tranh chấp có **ba** kết quả có thể xảy ra, phần tiền gửi của Fisherman cũng vậy. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- Nếu tranh chấp bị từ chối, GRT do Fisherman gửi sẽ bị đốt, và Indexer tranh chấp sẽ không bị phạt cắt giảm (slashed). -- Nếu tranh chấp được giải quyết dưới dạng hòa, tiền gửi của Fisherman sẽ được trả lại, và Indexer bị tranh chấp sẽ không bị phạt cắt giảm (slashed). -- Nếu tranh chấp được chấp nhận, lượng GRT do Fisherman đã gửi sẽ được trả lại, Indexer bị tranh chấp sẽ bị cắt và Fisherman sẽ kiếm được 50% GRT đã bị phạt cắt giảm (slashed). +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -Tranh chấp có thể được xem trong giao diện người dùng trong trang hồ sơ của Indexer trong mục `Tranh chấp`. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### Các khoản hoàn phí truy vấn là gì và chúng được phân phối khi nào? +### What are query fee rebates and when are they distributed? Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### Cắt giảm phí truy vấn và cắt giảm phần thưởng indexing là gì? +### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. @@ -93,29 +93,29 @@ The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that ### How do Indexers know which subgraphs to index? -Indexer có thể tự phân biệt bản thân bằng cách áp dụng các kỹ thuật nâng cao để đưa ra quyết định index subgraph nhưng để đưa ra ý tưởng chung, chúng ta sẽ thảo luận một số số liệu chính được sử dụng để đánh giá các subgraph trong mạng: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **Tín hiệu curation** - Tỷ lệ tín hiệu curation mạng được áp dụng cho một subgraph cụ thể là một chỉ báo tốt về mức độ quan tâm đến subgraph đó, đặc biệt là trong giai đoạn khởi động khi khối lượng truy vấn đang tăng lên. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **Phí truy vấn đã thu** - Dữ liệu lịch sử về khối lượng phí truy vấn được thu thập cho một subgraph cụ thể là một chỉ báo tốt về nhu cầu trong tương lai. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. - **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Subgraph không có phần thưởng indexing** - Một số subgraph không tạo ra phần thưởng indexing chủ yếu vì chúng đang sử dụng các tính năng không được hỗ trợ như IPFS hoặc vì chúng đang truy vấn một mạng khác bên ngoài mainnet. Bạn sẽ thấy một thông báo trên một subgraph nếu nó không tạo ra phần thưởng indexing. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### Có các yêu cầu gì về phần cứng (hardware)? +### What are the hardware requirements? -- **Nhỏ** - Đủ để bắt đầu index một số subgraph, có thể sẽ cần được mở rộng. -- **Tiêu chuẩn** - Thiết lập mặc định, đây là những gì được sử dụng trong bản kê khai (manifest) triển khai mẫu k8s/terraform. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Lớn** - Được chuẩn bị để index tất cả các subgraph hiện đang được sử dụng và phục vụ các yêu cầu cho lưu lượng truy cập liên quan. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Cài đặt | Postgres
(CPUs) | Postgres
(bộ nhớ tính bằng GB) | Postgres
(đĩa tính bằng TB) | VMs
(CPUs) | VMs
(bộ nhớ tính bằng GB) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Nhỏ | 4 | 8 | 1 | 4 | 16 | -| Tiêu chuẩn | 8 | 30 | 1 | 12 | 48 | -| Trung bình | 16 | 64 | 2 | 32 | 64 | -| Lớn | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -123,7 +123,7 @@ Indexer có thể tự phân biệt bản thân bằng cách áp dụng các k - **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Cơ sở hạ tầng +## Infrastructure At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. @@ -135,50 +135,50 @@ At the center of an Indexer's infrastructure is the Graph Node which monitors th - **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Lưu ý: Để hỗ trợ mở rộng quy mô nhanh, bạn nên tách các mối quan tâm về truy vấn và indexing giữa các nhóm node khác nhau: node truy vấn và node index. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. -### Tổng quan về các cổng +### Ports overview > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Cổng | Mục đích | Tuyến | Đối số CLI | Biến môi trường | -| --- | --- | --- | --- | --- | -| 8000 | Máy chủ GraphQL HTTP
(cho các truy vấn subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(cho các đăng ký subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(để quản lý triển khai) | / | --admin-port | - | -| 8030 | API trạng thái lập chỉ mục Subgraph | /graphql | --index-node-port | - | -| 8040 | Số liệu Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Dịch vụ Indexer +#### Indexer Service -| Cổng | Mục đích | Tuyến | Đối số CLI | Biến môi trường | -| --- | --- | --- | --- | --- | -| 7600 | Máy chủ GraphQL HTTP
(cho các truy vấn subgraph có trả phí) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Số liệu Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | -#### Đại lý Indexer +#### Indexer Agent -| Cổng | Mục đích | Tuyến | Đối số CLI | Biến môi trường | -| ---- | ------------------- | ----- | ------------------------- | --------------------------------------- | -| 8000 | API quản lý Indexer | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Thiết lập cơ sở hạ tầng máy chủ bằng Terraform trên Google Cloud +### Setup server infrastructure using Terraform on Google Cloud > Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Cài đặt điều kiện tiên quyết +#### Install prerequisites - Google Cloud SDK -- Công cụ dòng lệnh Kubectl +- Kubectl command line tool - Terraform -#### Tạo một dự án Google Cloud +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Lưu ý: Để hỗ trợ mở rộng quy mô nhanh, bạn nên tách các mối cd terraform ``` -- Xác thực với Google Cloud và tạo một dự án mới. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,9 +196,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Sử dụng \[billing page\](billing page) của Google Cloud Consolde để cho phép thanh toán cho dự án mới. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Tạo một cấu hình Google Cloud. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Bật các API Google Cloud được yêu cầu. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Tạo một tài khoản dịch vụ. +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Bật tính năng ngang hàng (peering) giữa cơ sở dữ liệu và cụm Kubernetes sẽ được tạo trong bước tiếp theo. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,35 +249,35 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Tạo tệp cấu hình terraform tối thiểu (cập nhật nếu cần). +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < \ -f Dockerfile.indexer-service \ -t indexer-service:latest \ -# Đại lý Indexer +# Indexer agent docker build \ --build-arg NPM_TOKEN= \ -f Dockerfile.indexer-agent \ -t indexer-agent:latest \ ``` -- Chạy các thành phần +- Run the components ```sh docker run -p 7600:7600 -it indexer-service:latest ... @@ -451,15 +451,15 @@ docker run -p 18000:8000 -it indexer-agent:latest ... **NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). -#### Sử dụng K8s and Terraform +#### Using K8s and Terraform See the [Setup Server Infrastructure Using Terraform on Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) section -#### Sử dụng +#### Usage -> **LƯU Ý**: Tất cả các biến cấu hình thời gian chạy có thể được áp dụng dưới dạng tham số cho lệnh khi khởi động hoặc sử dụng các biến môi trường của định dạng `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### Đại lý Indexer +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Dịch vụ Indexer +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -516,7 +516,7 @@ graph-indexer-service start \ #### Indexer CLI -Indexer CLI là một plugin dành cho [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) có thể truy cập trong terminal tại `graph indexer`. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 @@ -527,7 +527,7 @@ graph indexer status The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Sử dụng +#### Usage The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. @@ -535,7 +535,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - Đặt một hoặc nhiều quy tắc indexing. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. - `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. @@ -557,15 +557,15 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -Tất cả các lệnh hiển thị quy tắc trong đầu ra có thể chọn giữa các định dạng đầu ra được hỗ trợ (`table`, `yaml`, and `json`) bằng việc sử dụng đối số `-output`. +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### Các quy tắc indexing +#### Indexing rules Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -Ví dụ: nếu quy tắc chung có `minStake` của **5** (GRT), bất kỳ triển khai subgraph nào có hơn 5 (GRT) stake được phân bổ cho nó sẽ được index. Các quy tắc ngưỡng bao gồm `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, và `minAverageQueryFees`. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -Mô hình dữ liệu: +Data model: ```graphql type IndexingRule { @@ -615,7 +615,7 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: - Action added to the queue by the 3rd party optimizer tool or indexer-cli user - Indexer can use the `indexer-cli` to view all queued actions @@ -624,7 +624,7 @@ The action execution worker will only grab items from the queue to execute if th - If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. - The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Mô hình dữ liệu: +Data model: ```graphql Type ActionInput { @@ -704,38 +704,38 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Các mô hình chi phí +#### Cost models Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -Ngôn ngữ Agora cung cấp một định dạng linh hoạt để khai báo các mô hình chi phí cho các truy vấn. Mô hình giá Agora là một chuỗi các câu lệnh thực thi theo thứ tự cho mỗi truy vấn cấp cao nhất trong một truy vấn GraphQL. Đối với mỗi truy vấn cấp cao nhất, câu lệnh đầu tiên phù hợp với nó xác định giá cho truy vấn đó. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -Một câu lệnh bao gồm một vị từ (predicate), được sử dụng để đối sánh các truy vấn GraphQL và một biểu thức chi phí mà khi được đánh giá sẽ xuất ra chi phí ở dạng GRT thập phân. Các giá trị ở vị trí đối số được đặt tên của một truy vấn có thể được ghi lại trong vị từ và được sử dụng trong biểu thức. Các Globals có thể được đặt và thay thế cho các phần giữ chỗ trong một biểu thức. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -Mô hình chi phí mẫu: +Example cost model: ``` -# Câu lệnh này ghi lại giá trị bỏ qua (skip), -# sử dụng biểu thức boolean trong vị từ để khớp với các truy vấn cụ thể sử dụng `skip` -# và một biểu thức chi phí để tính toán chi phí dựa trên giá trị `skip` và SYSTEM_LOAD global +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; -# Mặc định này sẽ khớp với bất kỳ biểu thức GraphQL nào. -# Nó sử dụng một Global được thay thế vào biểu thức để tính toán chi phí +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost default => 0.1 * $SYSTEM_LOAD; ``` Example query costing using the above model: -| Truy vấn | Giá | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### Áp dụng mô hình chi phí +#### Applying the cost model Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. @@ -744,9 +744,9 @@ indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Tương tác với mạng +## Interacting with the network -### Stake trong giao thức +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. @@ -754,23 +754,23 @@ The first steps to participating in the network as an Indexer are to approve the Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### Phê duyệt các token +#### Approve tokens -1. Mở [Remix app](https://remix.ethereum.org/) trong một trình duyệt +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. Trong `File Explorer` tạo một tệp tên **GraphToken.abi** với [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Đặt địa chỉ hợp đồng GraphToken - Dán địa chỉ hợp đồng GraphToken(`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) kế bên `At Address` và nhấp vào nút `At address` để áp dụng. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. Gọi chức năng `approve(spender, amount)` để phê duyệt hợp đồng Staking. Điền phần `spender` bằng địa chỉ hợp đồng Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) và điền `amount` bằng số token để stake (tính bằng wei). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### Stake các token +#### Stake tokens -1. Mở [Remix app](https://remix.ethereum.org/) trong một trình duyệt +1. Open the [Remix app](https://remix.ethereum.org/) in a browser 2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. @@ -778,9 +778,9 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexi 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Đặt địa chỉ hợp đồng Staking - Dán địa chỉ hợp đồng Staking (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) kế bên `At Address` và nhấp vào nút `At address` để áp dụng. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. Gọi lệnh `stake()` để stake GRT vào giao thức. +6. Call `stake()` to stake GRT in the protocol. 7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### Tuổi thọ của một phân bổ +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 0d7d3a78934629d98ea72e2e1f8309ef983be30e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:54 -0500 Subject: [PATCH 0088/1534] New translations overview.mdx (Marathi) --- website/src/pages/mr/indexing/overview.mdx | 198 ++++++++++----------- 1 file changed, 99 insertions(+), 99 deletions(-) diff --git a/website/src/pages/mr/indexing/overview.mdx b/website/src/pages/mr/indexing/overview.mdx index 5a996e1eff59..0849723cc4c9 100644 --- a/website/src/pages/mr/indexing/overview.mdx +++ b/website/src/pages/mr/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: Indexing +title: Indexing Overview +sidebarTitle: सविश्लेषण --- Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. @@ -16,29 +17,29 @@ The minimum stake for an Indexer is currently set to 100K GRT. ### What are the revenue streams for an Indexer? -**क्वेरी फी रिबेट्स** - नेटवर्कवर क्वेरी सर्व्ह करण्यासाठी देयके. ही देयके इंडेक्सर आणि गेटवे दरम्यान राज्य चॅनेलद्वारे मध्यस्थी केली जातात. गेटवेवरील प्रत्येक क्वेरी विनंतीमध्ये पेमेंट आणि संबंधित प्रतिसाद क्वेरी परिणाम वैधतेचा पुरावा असतो. +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**इंडेक्सिंग रिवॉर्ड्स** - 3% वार्षिक प्रोटोकॉल वाइड इन्फ्लेशनद्वारे व्युत्पन्न केलेले, इंडेक्सिंग रिवॉर्ड्स इंडेक्सर्सना वितरित केले जातात जे नेटवर्कसाठी सबग्राफ डिप्लॉयमेंट अनुक्रमित करतात. +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. ### How are indexing rewards distributed? -इंडेक्सिंग रिवॉर्ड्स प्रोटोकॉल इन्फ्लेशनमधून येतात जे 3% वार्षिक जारी करण्यावर सेट केले जातात. ते प्रत्येकावरील सर्व क्युरेशन सिग्नलच्या प्रमाणावर आधारित सबग्राफमध्ये वितरीत केले जातात, नंतर त्या सबग्राफवरील त्यांच्या वाटप केलेल्या भागभांडवलांच्या आधारे अनुक्रमणिकेला प्रमाणात वितरीत केले जातात. **पुरस्कारांसाठी पात्र होण्यासाठी लवाद चार्टरने सेट केलेल्या मानकांची पूर्तता करणार्‍या अनुक्रमणिकेच्या वैध पुराव्यासह (POI) वाटप बंद करणे आवश्यक आहे.** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. ### What is a proof of indexing (POI)? -इंडेक्सर त्यांनी वाटप केलेले सबग्राफ अनुक्रमित करत आहे हे सत्यापित करण्यासाठी नेटवर्कमध्ये POIs वापरले जातात. इंडेक्सिंग रिवॉर्ड्ससाठी पात्र होण्यासाठी त्या वाटपाचे वाटप बंद करताना वर्तमान युगाच्या पहिल्या ब्लॉकसाठी POI सबमिट करणे आवश्यक आहे. ब्लॉकसाठी POI हे सर्व एंटिटी स्टोअर व्यवहारांसाठी एक डायजेस्ट आहे जे विशिष्ट सबग्राफ तैनातीसाठी आणि त्या ब्लॉकपर्यंत समाविष्ट आहे. +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. ### When are indexing rewards distributed? -वाटप सक्रिय असताना आणि 28 युगांमध्ये वाटप करत असताना ते सतत बक्षिसे मिळवत असतात. इंडेक्सर्सद्वारे बक्षिसे गोळा केली जातात आणि जेव्हाही त्यांचे वाटप बंद होते तेव्हा ते वितरित केले जातात. हे एकतर मॅन्युअली घडते, जेव्हा जेव्हा इंडेक्सर त्यांना सक्तीने बंद करू इच्छितो, किंवा 28 युगांनंतर एक प्रतिनिधी इंडेक्सरसाठी वाटप बंद करू शकतो, परंतु यामुळे कोणतेही पुरस्कार मिळत नाहीत. 28 युग हे जास्तीत जास्त वाटप आजीवन आहे (सध्या, एक युग ~24 तास चालतो). +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). ### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -समुदायाने बनवलेल्या अनेक डॅशबोर्डमध्ये प्रलंबित पुरस्कार मूल्यांचा समावेश आहे आणि ते या चरणांचे अनुसरण करून सहजपणे व्यक्तिचलितपणे तपासले जाऊ शकतात: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: 1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: @@ -59,21 +60,20 @@ query indexerAllocations { Use Etherscan to call `getRewards()`: - Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* `getRewards()` ला कॉल करण्यासाठी: +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - Enter the **allocationID** in the input. - Click the **Query** button. -### विवाद काय आहेत आणि मी ते कोठे पाहू शकतो? +### What are disputes and where can I view them? -इंडेक्सरच्या क्वेरी आणि वाटप या दोन्ही विवाद कालावधी दरम्यान ग्राफवर विवादित केले जाऊ शकतात. विवादाच्या प्रकारानुसार विवाद कालावधी बदलतो. क्वेरी/प्रमाणपत्रांमध्ये 7 युग विवाद विंडो आहे, तर वाटपांमध्ये 56 युगे आहेत. हा कालावधी संपल्यानंतर, वाटप किंवा क्वेरी यापैकी कोणतेही विवाद उघडले जाऊ शकत नाहीत. जेव्हा विवाद उघडला जातो, तेव्हा मच्छिमारांना किमान 10,000 GRT जमा करणे आवश्यक असते, जे विवाद अंतिम होईपर्यंत आणि ठराव दिले जाईपर्यंत लॉक केले जाईल. मच्छीमार हे कोणतेही नेटवर्क सहभागी आहेत जे विवाद उघडतात. +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- विवाद नाकारल्यास, मच्छिमारांनी जमा केलेला GRT जाळून टाकला जाईल आणि विवादित इंडेक्सर कमी केला जाणार नाही. -- विवाद सोडतीप्रमाणे निकाली काढल्यास, मच्छिमारांची ठेव परत केली जाईल आणि विवादित इंडेक्सर कमी केला जाणार नाही. -- विवाद स्वीकारल्यास, मच्छिमारांनी जमा केलेला GRT परत केला जाईल, विवादित इंडेक्सर कमी केला जाईल आणि मच्छिमारांना कमी केलेल्या GRT च्या 50% मिळतील. +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. @@ -85,7 +85,7 @@ Once an allocation has been closed the rebates are available to be claimed by th ### What is query fee cut and indexing reward cut? -`queryFeeCut` आणि `indexingRewardCut` मूल्ये ही डेलिगेशन पॅरामीटर्स आहेत जी इंडेक्सर आणि त्यांच्या प्रतिनिधींमधील GRT चे वितरण नियंत्रित करण्यासाठी CooldownBlocks सोबत सेट करू शकतात. डेलिगेशन पॅरामीटर्स सेट करण्याच्या सूचनांसाठी [प्रोटोकॉलमध्ये स्टॅकिंग](/indexing/overview/#stake-in-the-protocol) मधील शेवटच्या पायऱ्या पहा. +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. - **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. @@ -93,15 +93,15 @@ Once an allocation has been closed the rebates are available to be claimed by th ### How do Indexers know which subgraphs to index? -सबग्राफ इंडेक्सिंग निर्णय घेण्यासाठी प्रगत तंत्रे लागू करून इंडेक्सर्स स्वतःला वेगळे करू शकतात परंतु सामान्य कल्पना देण्यासाठी आम्ही नेटवर्कमधील सबग्राफचे मूल्यांकन करण्यासाठी वापरल्या जाणार्‍या अनेक मुख्य मेट्रिक्सवर चर्चा करू: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **क्युरेशन सिग्नल** - विशिष्ट सबग्राफवर लागू केलेले नेटवर्क क्युरेशन सिग्नलचे प्रमाण हे त्या सबग्राफमधील स्वारस्याचे एक चांगले सूचक आहे, विशेषत: जेव्हा क्वेरी व्हॉल्यूमिंग वाढत असते तेव्हा बूटस्ट्रॅप टप्प्यात. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. - **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **स्टेक केलेली रक्कम** - इतर इंडेक्सर्सच्या वर्तनाचे निरीक्षण करणे किंवा विशिष्ट सबग्राफसाठी वाटप केलेल्या एकूण स्टेकचे प्रमाण पाहणे इंडेक्सरला सबग्राफ ओळखण्यासाठी सबग्राफ क्वेरीसाठी पुरवठा बाजूचे निरीक्षण करण्यास अनुमती देऊ शकते नेटवर्क अधिक पुरवठ्याची गरज दर्शवू शकणार्‍या सबग्राफवर विश्वास दाखवत आहे. +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **कोणतेही अनुक्रमणिका रिवॉर्ड नसलेले सबग्राफ** - काही सबग्राफ्स इंडेक्सिंग रिवॉर्ड व्युत्पन्न करत नाहीत कारण ते IPFS सारखी असमर्थित वैशिष्ट्ये वापरत आहेत किंवा ते मेननेटच्या बाहेर दुसर्‍या नेटवर्कची चौकशी करत असल्यामुळे. सबग्राफ इंडेक्सिंग रिवॉर्ड्स व्युत्पन्न करत नसल्यास तुम्हाला एक मेसेज दिसेल. +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. ### What are the hardware requirements? @@ -110,63 +110,63 @@ Once an allocation has been closed the rebates are available to be claimed by th - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. - **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| मानक | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? -- **ऑपरेटर वॉलेट** - ऑपरेटर वॉलेट सेट करणे ही एक महत्त्वाची खबरदारी आहे कारण ते इंडेक्सरला त्यांच्या स्टेक नियंत्रित करणार्‍या की आणि दैनंदिन कामकाजावर नियंत्रण ठेवणार्‍या की यांच्यात वेगळेपणा राखण्यास अनुमती देते. सूचनांसाठी [प्रोटोकॉलमध्ये भागीदारी](/indexing/overview/#stake-in-the-protocol) पहा. +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **फायरवॉल** - फक्त इंडेक्सर सेवा सार्वजनिकपणे उघड करणे आवश्यक आहे आणि प्रशासक पोर्ट आणि डेटाबेस प्रवेश लॉक करण्यासाठी विशेष लक्ष दिले पाहिजे: ग्राफ नोड JSON-RPC एंडपॉइंट (डीफॉल्ट पोर्ट: 8030), इंडेक्सर मॅनेजमेंट API एंडपॉइंट (डीफॉल्ट पोर्ट: 18000), आणि पोस्टग्रेस डेटाबेस एंडपॉइंट (डीफॉल्ट पोर्ट: 5432) उघड करू नये. +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. ## Infrastructure -इंडेक्सरच्या इन्फ्रास्ट्रक्चरच्या मध्यभागी आलेख नोड असतो जो अनुक्रमित नेटवर्कचे निरीक्षण करतो, सबग्राफ परिभाषानुसार डेटा काढतो आणि लोड करतो आणि [GraphQL API म्हणून काम करतो ](/about/#how-the-graph-works). ग्राफ नोडला प्रत्येक अनुक्रमित नेटवर्कमधील डेटा उघड करणाऱ्या एंडपॉईंटशी कनेक्ट करणे आवश्यक आहे; डेटा सोर्सिंगसाठी आयपीएफएस नोड; त्याच्या स्टोअरसाठी PostgreSQL डेटाबेस; आणि इंडेक्सर घटक जे त्याचे नेटवर्कशी परस्परसंवाद सुलभ करतात. +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL डेटाबेस** - ग्राफ नोडसाठी मुख्य स्टोअर, येथे सबग्राफ डेटा संग्रहित केला जातो. इंडेक्सर सेवा आणि एजंट राज्य चॅनेल डेटा, किमतीचे मॉडेल, अनुक्रमणिका नियम आणि वाटप क्रिया संचयित करण्यासाठी डेटाबेसचा वापर करतात. +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **डेटा एंडपॉइंट** - EVM-सुसंगत नेटवर्कसाठी, ग्राफ नोडला EVM-सुसंगत JSON-RPC API उघड करणार्‍या एंडपॉइंटशी कनेक्ट करणे आवश्यक आहे. हे एकल क्लायंटचे रूप घेऊ शकते किंवा ते अधिक जटिल सेटअप असू शकते जे अनेकांवर लोड बॅलन्स करते. हे लक्षात ठेवणे महत्त्वाचे आहे की काही सबग्राफसाठी विशिष्ट क्लायंट क्षमतांची आवश्यकता असेल जसे की संग्रहण मोड आणि/किंवा पॅरिटी ट्रेसिंग API. +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS नोड (5 पेक्षा कमी आवृत्ती)** - सबग्राफ उपयोजन मेटाडेटा IPFS नेटवर्कवर संग्रहित केला जातो. सबग्राफ मॅनिफेस्ट आणि सर्व लिंक केलेल्या फाइल्स आणण्यासाठी सबग्राफ डिप्लॉयमेंट दरम्यान ग्राफ नोड प्रामुख्याने आयपीएफएस नोडमध्ये प्रवेश करतो. नेटवर्क इंडेक्सर्सना त्यांचा स्वतःचा IPFS नोड होस्ट करण्याची आवश्यकता नाही, नेटवर्कसाठी एक IPFS नोड https://ipfs.network.thegraph.com वर होस्ट केला आहे. +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **इंडेक्सर सेवा** - नेटवर्कसह सर्व आवश्यक बाह्य संप्रेषणे हाताळते. किमतीची मॉडेल्स आणि अनुक्रमणिका स्थिती शेअर करते, गेटवेकडून आलेख नोडवर क्वेरी विनंत्या पाठवते आणि गेटवेसह राज्य चॅनेलद्वारे क्वेरी पेमेंट व्यवस्थापित करते. +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **इंडेक्सर एजंट** - नेटवर्कवर नोंदणी करणे, त्याच्या ग्राफ नोड्सवर सबग्राफ उपयोजन व्यवस्थापित करणे आणि वाटप व्यवस्थापित करणे यासह साखळीवरील इंडेक्सर्स परस्परसंवाद सुलभ करते. +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **प्रोमेथियस मेट्रिक्स सर्व्हर** - ग्राफ नोड आणि इंडेक्सर घटक त्यांचे मेट्रिक्स मेट्रिक्स सर्व्हरवर लॉग करतात. +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -टीप: चपळ स्केलिंगला समर्थन देण्यासाठी, नोड्सच्या वेगवेगळ्या सेटमध्ये क्वेरी आणि इंडेक्सिंग चिंता विभक्त करण्याची शिफारस केली जाते: क्वेरी नोड्स आणि इंडेक्स नोड्स. +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. ### Ports overview -> **महत्त्वाचे**: पोर्ट सार्वजनिकपणे उघड करण्याबाबत सावधगिरी बाळगा - **प्रशासन पोर्ट** लॉक डाउन ठेवले पाहिजेत. यामध्ये ग्राफ नोड JSON-RPC आणि खाली तपशीलवार निर्देशांक व्यवस्थापन एंडपॉइंट्स समाविष्ट आहेत. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### आलेख नोड -| बंदर | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| बंदर | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| बंदर | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ----------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | इंडेक्सर व्यवस्थापन API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | ### Setup server infrastructure using Terraform on Google Cloud @@ -256,13 +256,13 @@ indexer= cat > terraform.tfvars <बेस` एंट्री समायोजित करा /code> जेणेकरून ते निर्देशिकेकडे निर्देश करेल `k8s/base`. +- Copy the directory `k8s/overlays` to a new directory `$dir,` and adjust the `bases` entry in `$dir/kustomization.yaml` so that it points to the directory `k8s/base`. - Read through all the files in `$dir` and adjust any values as indicated in the comments. @@ -336,9 +336,9 @@ cargo run -p graph-node --release -- \ #### Getting started using Docker -#### पूर्वतयारी +#### Prerequisites -- **Ethereum नोड** - डीफॉल्टनुसार, डॉकर कंपोझ सेटअप मेननेट वापरेल: [http:// host.docker.internal:8545](http://host.docker.internal:8545) तुमच्या होस्ट मशीनवरील इथरियम नोडशी कनेक्ट करण्यासाठी. तुम्ही `docker-compose.yaml` अपडेट करून हे नेटवर्क नाव आणि url बदलू शकता. +- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. #### Setup @@ -349,7 +349,7 @@ git clone https://github.com/graphprotocol/graph-node cd graph-node/docker ``` -2. फक्त लिनक्स वापरकर्त्यांसाठी - समाविष्ट स्क्रिप्ट वापरून `docker-compose.yaml` मध्ये `host.docker.internal` ऐवजी होस्ट IP पत्ता वापरा: +2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yaml `using the included script: ```sh ./setup.sh @@ -363,13 +363,13 @@ docker-compose up ### Indexer components -फक्त लिनक्स वापरण्यासाठी - स्क्रिप्ट वापरून `docker-compose. yaml` मध्ये `host. docker. internal` समाविष्ट करा होस्ट आयपी पत्ता वापरा: +To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **इंडेक्सर एजंट** - एजंट नेटवर्क आणि इंडेक्सरच्या स्वतःच्या इन्फ्रास्ट्रक्चरचे निरीक्षण करतो आणि कोणते सबग्राफ डिप्लॉयमेंट अनुक्रमित केले जातात आणि साखळीवर वाटप केले जातात आणि प्रत्येकासाठी किती वाटप केले जाते हे व्यवस्थापित करतो. +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **इंडेक्सर सेवा** - बाहेरून उघड करणे आवश्यक असलेला एकमेव घटक, सेवा ग्राफ नोडवर सबग्राफ क्वेरी पास करते, क्वेरी पेमेंटसाठी राज्य चॅनेल व्यवस्थापित करते, महत्त्वपूर्ण निर्णय घेण्याची माहिती सामायिक करते गेटवे सारख्या ग्राहकांना. +- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. -- **इंडेक्सर CLI** - इंडेक्सर एजंट व्यवस्थापित करण्यासाठी कमांड लाइन इंटरफेस. हे इंडेक्सर्सना खर्चाचे मॉडेल, मॅन्युअल वाटप, क्रिया रांग आणि अनुक्रमणिका नियम व्यवस्थापित करण्यास अनुमती देते. +- **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. #### Getting started @@ -455,9 +455,9 @@ docker run -p 18000:8000 -it indexer-agent:latest ... See the [Setup Server Infrastructure Using Terraform on Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) section -#### वापर +#### Usage -> **सूचना**: सर्व रनटाइम कॉन्फिगरेशन व्हेरिएबल्स एकतर स्टार्टअपवर कमांडवर पॅरामीटर्स म्हणून लागू केले जाऊ शकतात किंवा `COMPONENT_NAME_VARIABLE_NAME` फॉरमॅटचे पर्यावरण व्हेरिएबल्स वापरून (उदा. `INDEXER_AGENT_ETHEREUM`). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). #### Indexer agent @@ -525,33 +525,33 @@ graph indexer status #### Indexer management using Indexer CLI -**इंडेक्सर मॅनेजमेंट API** सह संवाद साधण्यासाठी सुचवलेले साधन हे **इंडेक्सर CLI** आहे, जो **ग्राफ CLI** चा विस्तार आहे. इंडेक्सर एजंटला इंडेक्सरच्या वतीने नेटवर्कशी स्वायत्तपणे संवाद साधण्यासाठी इंडेक्सरकडून इनपुट आवश्यक आहे. इंडेक्सर एजंट वर्तन परिभाषित करण्याची यंत्रणा म्हणजे **अलोकेशन व्यवस्थापन** मोड आणि **इंडेक्सिंग नियम**. ऑटो मोड अंतर्गत, इंडेक्सर इंडेक्समध्ये सबग्राफ निवडण्यासाठी त्यांची विशिष्ट रणनीती लागू करण्यासाठी **इंडेक्सिंग नियम** वापरू शकतो आणि त्यांच्यासाठी क्वेरी देऊ शकतो. नियम एजंटद्वारे प्रदान केलेल्या GraphQL API द्वारे व्यवस्थापित केले जातात आणि इंडेक्सर व्यवस्थापन API म्हणून ओळखले जातात. मॅन्युअल मोड अंतर्गत, इंडेक्सर **क्रिया रांग** वापरून वाटप क्रिया तयार करू शकतो आणि ते कार्यान्वित होण्यापूर्वी त्यांना स्पष्टपणे मंजूर करू शकतो. ओव्हरसाइट मोड अंतर्गत, **अ‍ॅक्शन रांग** भरण्यासाठी **इंडेक्सिंग नियम** वापरले जातात आणि अंमलबजावणीसाठी स्पष्ट मंजूरी देखील आवश्यक असते. +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### वापर +#### Usage -**इंडेक्सर CLI** इंडेक्सर एजंटशी कनेक्ट होतो, विशेषत: पोर्ट-फॉरवर्डिंगद्वारे, त्यामुळे CLI ला समान सर्व्हर किंवा क्लस्टरवर चालण्याची आवश्यकता नाही. तुम्हाला सुरुवात करण्यात मदत करण्यासाठी आणि काही संदर्भ देण्यासाठी, CLI चे येथे थोडक्यात वर्णन केले जाईल. +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `ग्राफ इंडेक्सर कनेक्ट ` - इंडेक्सर व्यवस्थापन API शी कनेक्ट करा. सामान्यत: सर्व्हरचे कनेक्शन पोर्ट फॉरवर्डिंगद्वारे उघडले जाते, त्यामुळे CLI सहजपणे दूरस्थपणे ऑपरेट केले जाऊ शकते. (उदाहरण: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) -- `ग्राफ इंडेक्सर नियमांना [options] [ ...]` - सर्व नियम मिळविण्यासाठी `सर्व` वापरून एक किंवा अधिक अनुक्रमणिका नियम मिळवा `` म्हणून, किंवा `ग्लोबल< /code> जागतिक डीफॉल्ट मिळविण्यासाठी. एक अतिरिक्त युक्तिवाद --merged` हे निर्दिष्ट करण्यासाठी वापरला जाऊ शकतो की तैनाती विशिष्ट नियम जागतिक नियमात विलीन केले जातात. इंडेक्सर एजंटमध्ये ते अशा प्रकारे लागू केले जातात. +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. - `graph indexer rules set [options] ...` - Set one or more indexing rules. -- `ग्राफ इंडेक्सर नियम सुरू करतात [options] ` - उपलब्ध असल्यास सबग्राफ उपयोजन अनुक्रमित करणे सुरू करा आणि त्याचा `निर्णय आधार` `नेहमी` वर सेट करा, त्यामुळे इंडेक्सर एजंट नेहमी त्याची अनुक्रमणिका निवडेल. जर जागतिक नियम नेहमी वर सेट केला असेल तर नेटवर्कवरील सर्व उपलब्ध सबग्राफ अनुक्रमित केले जातील. +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `ग्राफ इंडेक्सर नियम थांबवतात [options] ` - उपयोजन अनुक्रमित करणे थांबवा आणि त्याचा `decisionBasis` कधीही न करण्यासाठी सेट करा, त्यामुळे ते उपयोजनांवर निर्णय घेताना ही तैनाती वगळेल निर्देशांक. +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `ग्राफ इंडेक्सर नियम कदाचित [options] ` — उपयोजनासाठी `decisionBasis` सेट करा `नियम`, जेणेकरून इंडेक्सर एजंट हे उपयोजन अनुक्रमित करायचे की नाही हे ठरवण्यासाठी अनुक्रमणिका नियम वापरा. +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `ग्राफ इंडेक्सर क्रिया रांग वाटप <अलोकेशन-रक्कम>` - रांग वाटप क्रिया +- `graph indexer action queue allocate ` - Queue allocation action - `graph indexer action queue reallocate ` - Queue reallocate action - `graph indexer action queue unallocate ` - Queue unallocate action -- `ग्राफ इंडेक्सर क्रिया रद्द करा [ ...] ` - आयडी निर्दिष्ट न केल्यास रांगेतील सर्व क्रिया रद्द करा, अन्यथा विभाजक म्हणून स्पेससह आयडीचा अॅरे रद्द करा +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator - `graph indexer actions approve [ ...]` - Approve multiple actions for execution @@ -561,9 +561,9 @@ All commands which display rules in the output can choose between the supported #### Indexing rules -अनुक्रमणिका नियम एकतर जागतिक डीफॉल्ट म्हणून किंवा त्यांचे आयडी वापरून विशिष्ट सबग्राफ उपयोजनांसाठी लागू केले जाऊ शकतात. `डिप्लॉयमेंट` आणि `decisionBasis` फील्ड अनिवार्य आहेत, तर इतर सर्व फील्ड ऐच्छिक आहेत. जेव्हा इंडेक्सिंग नियमामध्ये `decisionBasis` म्हणून `नियम` असतात, तेव्हा इंडेक्सर एजंट त्या नियमावरील नॉन-नल थ्रेशोल्ड व्हॅल्यूजची तुलना संबंधित डिप्लॉयमेंटसाठी नेटवर्कमधून आणलेल्या मूल्यांशी करेल. जर सबग्राफ डिप्लॉयमेंटमध्ये कोणत्याही थ्रेशोल्डच्या वर (किंवा खाली) मूल्ये असतील तर ती अनुक्रमणिकेसाठी निवडली जाईल. +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -उदाहरणार्थ, जागतिक नियमामध्ये **5** (GRT) चा `minStake` असल्यास, 5 (GRT) पेक्षा जास्त स्टेक असलेली कोणतीही सबग्राफ डिप्लॉयमेंट त्याला वाटप अनुक्रमित केले जाईल. थ्रेशोल्ड नियमांमध्ये `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake` आणि `minAverageQueryFees` समाविष्ट आहेत. +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. Data model: @@ -613,16 +613,16 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK #### Actions queue CLI -इंडेक्सर-cli कृती रांगेसह मॅन्युअली कार्य करण्यासाठी `कृती` मॉड्यूल प्रदान करते. ते क्रिया रांगेशी संवाद साधण्यासाठी इंडेक्सर व्यवस्थापन सर्व्हरद्वारे होस्ट केलेले **Graphql API** वापरते. +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -क्रिया अंमलबजावणी कार्यकर्ता रांगेतील आयटम फक्त अंमलात आणण्यासाठी पकडेल जर त्यांच्याकडे `ActionStatus = मंजूर` असेल. शिफारस केलेल्या पथात क्रिया ActionStatus = रांगेत रांगेत जोडल्या जातात, त्यामुळे ऑन-चेन अंमलात आणण्यासाठी त्या नंतर मंजूर केल्या पाहिजेत. सामान्य प्रवाह असे दिसेल: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: - Action added to the queue by the 3rd party optimizer tool or indexer-cli user - Indexer can use the `indexer-cli` to view all queued actions -- इंडेक्सर (किंवा इतर सॉफ्टवेअर) `indexer-cli` वापरून रांगेतील क्रिया मंजूर किंवा रद्द करू शकतात. मंजूर आणि रद्द आदेश इनपुट म्हणून अॅक्शन आयडीचा अॅरे घेतात. -- अंमलबजावणी कर्मचारी नियमितपणे मंजूर कृतींसाठी रांगेत मतदान करतात. ते रांगेतील `मंजूर` क्रिया पकडेल, त्या कार्यान्वित करण्याचा प्रयत्न करेल आणि अंमलबजावणीच्या स्थितीनुसार `यशस्वी` किंवा `अयशस्वी< वर db मधील मूल्ये अपडेट करेल. /code>. -
  • एखादी कृती यशस्वी झाल्यास कार्यकर्ता खात्री करेल की एक अनुक्रमणिका नियम उपस्थित आहे जो एजंटला वाटप कसे व्यवस्थापित करावे हे सांगते, एजंट ऑटो` किंवा ` मध्ये असताना मॅन्युअल क्रिया करताना उपयुक्त oversight` मोड. -- इंडेक्सर कारवाईच्या अंमलबजावणीचा इतिहास पाहण्यासाठी कृती रांगेचे निरीक्षण करू शकतो आणि आवश्यक असल्यास क्रिया आयटमची अंमलबजावणी अयशस्वी झाल्यास पुन्हा मंजूर आणि अद्यतनित करू शकतो. कृती रांग रांगेत लावलेल्या आणि केलेल्या सर्व क्रियांचा इतिहास प्रदान करते. +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. Data model: @@ -706,25 +706,25 @@ Note that supported action types for allocation management have different input #### Cost models -किंमत मॉडेल बाजार आणि क्वेरी गुणधर्मांवर आधारित क्वेरीसाठी डायनॅमिक किंमत प्रदान करतात. इंडेक्सर सेवा प्रत्येक सबग्राफसाठी गेटवेसह किंमत मॉडेल सामायिक करते ज्यासाठी ते प्रश्नांना उत्तर देऊ इच्छितात. गेटवे, या बदल्यात, प्रति क्वेरी इंडेक्सर निवड निर्णय घेण्यासाठी आणि निवडलेल्या इंडेक्सर्ससह पेमेंटची वाटाघाटी करण्यासाठी किंमत मॉडेलचा वापर करतात. +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -अगोरा भाषा प्रश्नांसाठी किंमत मॉडेल घोषित करण्यासाठी एक लवचिक स्वरूप प्रदान करते. Agora किंमत मॉडेल हे विधानांचा एक क्रम आहे जो GraphQL क्वेरीमधील प्रत्येक उच्च-स्तरीय क्वेरीसाठी क्रमाने कार्यान्वित करतो. प्रत्येक उच्च-स्तरीय क्वेरीसाठी, त्याच्याशी जुळणारे पहिले विधान त्या क्वेरीसाठी किंमत निर्धारित करते. +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -स्टेटमेंटमध्ये प्रेडिकेटचा समावेश असतो, जो ग्राफक्यूएल क्वेरीशी जुळण्यासाठी वापरला जातो आणि किंमत एक्स्प्रेशन ज्याचे मूल्यमापन केल्यावर दशांश GRT मध्ये खर्च येतो. क्वेरीच्या नामित युक्तिवाद स्थितीतील मूल्ये प्रेडिकेटमध्ये कॅप्चर केली जाऊ शकतात आणि अभिव्यक्तीमध्ये वापरली जाऊ शकतात. अभिव्यक्तीमध्ये प्लेसहोल्डर्ससाठी ग्लोबल देखील सेट आणि बदलले जाऊ शकतात. +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. Example cost model: ``` -# हे विधान स्किप व्हॅल्यू कॅप्चर करते, -# 'वगळा' वापरणाऱ्या विशिष्‍ट क्‍वेरीशी जुळण्‍यासाठी प्रीडिकेटमध्‍ये बुलियन अभिव्‍यक्‍ती वापरते -# आणि `स्किप` मूल्य आणि SYSTEM_LOAD ग्लोबलवर आधारित खर्चाची गणना करण्यासाठी किंमत अभिव्यक्ती -क्वेरी { जोडी(वगळा: $skip) { id } } जेव्हा $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global +query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; -# हे डीफॉल्ट कोणत्याही GraphQL अभिव्यक्तीशी जुळेल. -# हे किंमत मोजण्यासाठी अभिव्यक्तीमध्ये बदललेल्या ग्लोबल वापरते -डीफॉल्ट => 0.1 * $SYSTEM_LOAD; +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost +default => 0.1 * $SYSTEM_LOAD; ``` Example query costing using the above model: @@ -737,7 +737,7 @@ Example query costing using the above model: #### Applying the cost model -किमतीचे मॉडेल इंडेक्सर CLI द्वारे लागू केले जातात, जे त्यांना डेटाबेसमध्ये साठवण्यासाठी इंडेक्सर एजंटच्या इंडेक्सर मॅनेजमेंट API कडे पाठवतात. इंडेक्सर सेवा नंतर त्यांना उचलेल आणि गेटवेला जेव्हा ते मागतील तेव्हा किंमत मॉडेल सर्व्ह करेल. +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' @@ -752,7 +752,7 @@ The first steps to participating in the network as an Indexer are to approve the > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -एकदा इंडेक्सरने प्रोटोकॉलमध्ये GRT स्टेक केल्यानंतर, [इंडेक्सर घटक](/indexing/overview/#indexer-components) सुरू केले जाऊ शकतात आणि त्यांचे नेटवर्कशी परस्परसंवाद सुरू करू शकतात. +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. #### Approve tokens @@ -764,9 +764,9 @@ The first steps to participating in the network as an Indexer are to approve the 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. ग्राफटोकन कॉन्ट्रॅक्ट अॅड्रेस सेट करा - ग्राफटोकन कॉन्ट्रॅक्ट अॅड्रेस पेस्ट करा (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) `At Address` च्या पुढे आणि लागू करण्यासाठी `At Address` बटणावर क्लिक करा. +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. स्टॅकिंग कॉन्ट्रॅक्टला मंजुरी देण्यासाठी `मंजूर(खर्च, रक्कम)` फंक्शनला कॉल करा. स्टॅकिंग कॉन्ट्रॅक्ट अॅड्रेससह `spender` भरा (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) आणि `रक्कम` शेअर करण्यासाठी टोकनसह (wei मध्ये). +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). #### Stake tokens @@ -778,13 +778,13 @@ The first steps to participating in the network as an Indexer are to approve the 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. स्टॅकिंग कॉन्ट्रॅक्ट अॅड्रेस सेट करा - स्टॅकिंग कॉन्ट्रॅक्ट अॅड्रेस पेस्ट करा (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) `At Address` च्या पुढे आणि अर्ज करण्यासाठी `At Address` बटणावर क्लिक करा. +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. 6. Call `stake()` to stake GRT in the protocol. -7. (पर्यायी) इंडेक्सर्स त्यांच्या इंडेक्सर इन्फ्रास्ट्रक्चरसाठी ऑपरेटर होण्यासाठी दुसर्‍या पत्त्याला मंजूरी देऊ शकतात जेणेकरुन निधी नियंत्रित करणार्‍या कीज उपग्राफवर वाटप करणे आणि (सशुल्क) क्वेरी देणे यासारख्या दैनंदिन क्रिया करत असलेल्यांपासून वेगळे करणे शक्य आहे. ऑपरेटर पत्त्यासह ऑपरेटर कॉल `setOperator()` सेट करण्यासाठी. +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (पर्यायी) रिवॉर्ड्सचे वितरण नियंत्रित करण्यासाठी आणि धोरणात्मकरित्या प्रतिनिधींना आकर्षित करण्यासाठी इंडेक्सर्स त्यांचे इंडेक्सिंग रिवॉर्डकट (भाग प्रति दशलक्ष), queryFeeCut (पार्ट्स प्रति दशलक्ष), आणि कूलडाउनब्लॉक्स (ब्लॉकची संख्या) अद्यतनित करून त्यांचे प्रतिनिधी पॅरामीटर्स अद्यतनित करू शकतात. असे करण्यासाठी `setDelegationParameters()` वर कॉल करा. खालील उदाहरण इंडेक्सरला 95% क्वेरी रिबेट्स आणि 5% प्रतिनिधींना वितरीत करण्यासाठी queryFeeCut सेट करते, इंडेक्सिंग रिवॉर्डकट इंडेक्सरला 60% इंडेक्सिंग रिवॉर्ड्स आणि 40% प्रतिनिधींना वितरीत करण्यासाठी सेट करते आणि `thecooldownBlocks` सेट करते 500 ब्लॉक्सचा कालावधी. +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -810,8 +810,8 @@ To set the delegation parameters using Graph Explorer interface, follow these st After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -ऑन-चेन वाटप तयार करण्यापूर्वी चेनहेडमध्ये सबग्राफ उपयोजन समक्रमित करण्यासाठी ऑफचेन सिंकिंग कार्यक्षमता वापरण्याची शिफारस इंडेक्सर्सना केली जाते. हे वैशिष्ट्य विशेषतः उपग्राफसाठी उपयुक्त आहे ज्यांना समक्रमित करण्यासाठी 28 पेक्षा जास्त काळ लागू शकतो किंवा अनिश्चितपणे अयशस्वी होण्याची काही शक्यता असते. +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From b12a30eb4df58e2807eb587487d73a3892b1a474 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:56 -0500 Subject: [PATCH 0089/1534] New translations overview.mdx (Hindi) --- website/src/pages/hi/indexing/overview.mdx | 376 ++++++++++----------- 1 file changed, 188 insertions(+), 188 deletions(-) diff --git a/website/src/pages/hi/indexing/overview.mdx b/website/src/pages/hi/indexing/overview.mdx index 2238eef1e04a..dfaed20e220c 100644 --- a/website/src/pages/hi/indexing/overview.mdx +++ b/website/src/pages/hi/indexing/overview.mdx @@ -1,5 +1,6 @@ --- -title: इंडेक्सिंग +title: Indexing का अवलोकन +sidebarTitle: अवलोकन --- Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. @@ -8,39 +9,39 @@ Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) i इंडेक्सर्स सबग्राफ के क्यूरेशन सिग्नल के आधार पर इंडेक्स के लिए सबग्राफ का चयन करते हैं, जहां क्यूरेटर GRT को यह इंगित करने के लिए दांव पर लगाते हैं कि कौन से सबग्राफ उच्च-गुणवत्ता वाले हैं और उन्हें प्राथमिकता दी जानी चाहिए। उपभोक्ता (उदाहरण के लिए अनुप्रयोग) पैरामीटर भी सेट कर सकते हैं जिसके लिए इंडेक्सर्स अपने सबग्राफ के लिए प्रश्नों को प्रोसेस करते हैं और क्वेरी शुल्क मूल्य निर्धारण के लिए वरीयताएँ निर्धारित करते हैं। -## सामान्य प्रश्न +## FAQ -### नेटवर्क पर इंडेक्सर बनने के लिए आवश्यक न्यूनतम हिस्सेदारी क्या है? +### What is the minimum stake required to be an Indexer on the network? -इंडेक्सर के लिए न्यूनतम हिस्सेदारी वर्तमान में 100K GRT पर सेट है। +The minimum stake for an Indexer is currently set to 100K GRT. -### एक इंडेक्सर के लिए राजस्व धाराएं क्या हैं? +### What are the revenue streams for an Indexer? -**प्रश्न शुल्क में छूट** - नेटवर्क पर प्रश्न प्रस्तुत करने के लिए भुगतान। ये भुगतान एक इंडेक्सर और गेटवे के बीच राज्य चैनलों के माध्यम से मध्यस्थ होते हैं। गेटवे से प्रत्येक क्वेरी अनुरोध में भुगतान होता है और संबंधित प्रतिक्रिया क्वेरी परिणाम वैधता का प्रमाण होती है। +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. -**इंडेक्सिंग रिवार्ड्स** - 3% वार्षिक प्रोटोकॉल व्यापक मुद्रास्फीति के माध्यम से उत्पन्न, इंडेक्सिंग पुरस्कार उन इंडेक्सर्स को वितरित किए जाते हैं जो नेटवर्क के लिए सबग्राफ तैनाती को इंडेक्स कर रहे हैं। +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. -### इंडेक्सिंग पुरस्कार कैसे वितरित किए जाते हैं? +### How are indexing rewards distributed? -अनुक्रमण पुरस्कार प्रोटोकॉल मुद्रास्फीति से आते हैं जो 3% वार्षिक जारी करने के लिए निर्धारित है। उन्हें प्रत्येक पर सभी क्यूरेशन सिग्नल के अनुपात के आधार पर सबग्राफ में वितरित किया जाता है, फिर उस सबग्राफ पर उनकी आवंटित हिस्सेदारी के आधार पर इंडेक्सर्स को आनुपातिक रूप से वितरित किया जाता है। **अनुक्रमण के एक वैध प्रमाण (POI) के साथ एक आवंटन बंद होना चाहिए जो पुरस्कारों के योग्य होने के लिए मध्यस्थता चार्टर द्वारा निर्धारित मानकों को पूरा करता है।** +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. -### इंडेक्सिंग (पीओआई) का सबूत क्या है? +### What is a proof of indexing (POI)? -पीओआई का उपयोग नेटवर्क में यह सत्यापित करने के लिए किया जाता है कि एक इंडेक्सर उनके द्वारा आवंटित उपग्राफों को अनुक्रमित कर रहा है। इंडेक्सिंग पुरस्कारों के लिए पात्र होने के लिए उस आवंटन के आवंटन को बंद करते समय वर्तमान युग के पहले ब्लॉक के लिए एक पीओआई जमा किया जाना चाहिए। एक ब्लॉक के लिए एक पीओआई उस ब्लॉक तक और उस सहित एक विशिष्ट सबग्राफ परिनियोजन के लिए सभी एंटिटी स्टोर लेनदेन के लिए डाइजेस्ट है। +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### अनुक्रमण पुरस्कार कब वितरित किए जाते हैं? +### When are indexing rewards distributed? -आवंटन लगातार पुरस्कार अर्जित कर रहे हैं जबकि वे सक्रिय हैं और 28 युगों के भीतर आवंटित किए गए हैं। इंडेक्सर्स द्वारा पुरस्कार एकत्र किए जाते हैं, और जब भी उनका आवंटन बंद हो जाता है, वितरित किया जाता है। यह या तो मैन्युअल रूप से होता है, जब भी इंडेक्सर उन्हें बंद करना चाहता है, या 28 युगों के बाद एक प्रतिनिधि इंडेक्सर के आवंटन को बंद कर सकता है, लेकिन इसका परिणाम कोई पुरस्कार नहीं होता है। 28 युग अधिकतम आवंटन जीवनकाल है (अभी, एक युग ~ 24h तक रहता है)। +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). -### क्या लंबित अनुक्रमण पुरस्कारों की निगरानी की जा सकती है? +### Can pending indexing rewards be monitored? The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. -कई समुदाय-निर्मित डैशबोर्ड में लंबित पुरस्कार मान शामिल हैं और इन चरणों का पालन करके उन्हें आसानी से मैन्युअल रूप से चेक किया जा सकता है: +Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: -1. [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) से सभी सक्रिय आवंटनों के लिए IDs प्राप्त करने के लिए क्वेरी करें: +1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -56,121 +57,120 @@ query indexerAllocations { } ``` -`getRewards()` को कॉल करने के लिए इथरस्कैन का उपयोग करें: +Use Etherscan to call `getRewards()`: -- [इथरस्कैन इंटरफ़ेस टू रिवॉर्ड्स कॉन्ट्रैक्ट](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) पर नेविगेट करें - -* कॉल करने के लिए `getRewards()`: +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- To call `getRewards()`: - Expand the **9. getRewards** dropdown. - - इनपुट में **allocationID** दर्ज करें। - - **क्वेरी** बटन क्लिक करें. + - Enter the **allocationID** in the input. + - Click the **Query** button. -### विवाद क्या हैं और मैं उन्हें कहां देख सकता हूं? +### What are disputes and where can I view them? -विवाद की अवधि के दौरान इंडेक्सर की पूछताछ और आवंटन दोनों को ग्राफ़ पर विवादित किया जा सकता है। विवाद के प्रकार के आधार पर विवाद की अवधि अलग-अलग होती है। प्रश्न/सत्यापन में 7 युग विवाद विंडो हैं, जबकि आवंटन में 56 युग हैं। इन अवधियों के बीत जाने के बाद, आवंटन या प्रश्नों में से किसी के विरुद्ध विवाद नहीं खोला जा सकता है। जब कोई विवाद खोला जाता है, तो मछुआरों द्वारा न्यूनतम 10,000 जीआरटी की जमा राशि की आवश्यकता होती है, जो विवाद को अंतिम रूप देने और समाधान दिए जाने तक बंद रहेगा। मछुआरे कोई भी नेटवर्क प्रतिभागी हैं जो विवाद खोलते हैं। +Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. -विवादों के **तीन** संभावित परिणाम होते हैं, वैसे ही मछुआरों की जमा राशि भी होती है। +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. -- यदि विवाद को खारिज कर दिया जाता है, तो मछुआरों द्वारा जमा किए गए जीआरटी को जला दिया जाएगा और विवादित इंडेक्सर को नहीं काटा जाएगा। -- यदि विवाद ड्रा के रूप में सुलझाया जाता है, तो मछुआरों की जमा राशि वापस कर दी जाएगी, और विवादित इंडेक्सर को नहीं काटा जाएगा। -- यदि विवाद स्वीकार किया जाता है, तो मछुआरों द्वारा जमा किया गया जीआरटी वापस कर दिया जाएगा, विवादित इंडेक्सर को घटा दिया जाएगा और मछुआरे घटे हुए जीआरटी का 50% अर्जित करेंगे। +- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. +- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. +- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. -`विवाद` टैब के अंतर्गत इंडेक्सर के प्रोफ़ाइल पृष्ठ में UI में विवादों को देखा जा सकता है। +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### प्रश्न शुल्क छूट क्या हैं और वे कब वितरित की जाती हैं? +### What are query fee rebates and when are they distributed? Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### क्वेरी फी कट और इंडेक्सिंग रिवॉर्ड कट क्या है? +### What is query fee cut and indexing reward cut? -`queryFeeCut` और `indexingRewardCut` मान डेलिगेशन पैरामीटर हैं जिन्हें इंडेक्सर और उनके डेलीगेटर्स के बीच GRT के वितरण को नियंत्रित करने के लिए इंडेक्सर cooldownBlocks के साथ सेट कर सकता है। प्रतिनिधिमंडल पैरामीटर सेट करने के निर्देशों के लिए [प्रोटोकॉल में स्टेकिंग](/indexing/overview/#stake-in-the-protocol) में अंतिम चरण देखें। +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. - **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. - **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### इंडेक्सर्स कैसे जानते हैं कि इंडेक्स करने के लिए कौन से सबग्राफ हैं? +### How do Indexers know which subgraphs to index? -इंडेक्सर्स सबग्राफ इंडेक्सिंग निर्णय लेने के लिए उन्नत तकनीकों को लागू करके खुद को अलग कर सकते हैं लेकिन एक सामान्य विचार देने के लिए हम नेटवर्क में सबग्राफ का मूल्यांकन करने के लिए उपयोग की जाने वाली कई प्रमुख मीट्रिक पर चर्चा करेंगे: +Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: -- **क्यूरेशन सिग्नल** - किसी विशेष सबग्राफ पर लागू नेटवर्क क्यूरेशन सिग्नल का अनुपात उस सबग्राफ में रुचि का एक अच्छा संकेतक है, विशेष रूप से बूटस्ट्रैप चरण के दौरान जब क्वेरी वॉल्यूमिंग बढ़ रही है. +- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. -- **एकत्रित क्वेरी शुल्क** - किसी विशिष्ट सबग्राफ के लिए एकत्र किए गए क्वेरी शुल्क की मात्रा का ऐतिहासिक डेटा भविष्य की मांग का एक अच्छा संकेतक है। +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. -- **दांव की गई राशि** - अन्य इंडेक्सर्स के व्यवहार की निगरानी करना या विशिष्ट सबग्राफ के लिए आवंटित कुल हिस्सेदारी के अनुपात को देखने से एक इंडेक्सर को सबग्राफ की पहचान करने के लिए सबग्राफ प्रश्नों के लिए आपूर्ति पक्ष की निगरानी करने की अनुमति मिल सकती है नेटवर्क विश्वास दिखा रहा है या सबग्राफ जो अधिक आपूर्ति की आवश्यकता दिखा सकता है। +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **बिना इंडेक्सिंग रिवार्ड वाले सबग्राफ** - कुछ सबग्राफ इंडेक्सिंग रिवार्ड्स उत्पन्न नहीं करते हैं, क्योंकि वे मुख्य रूप से IPFS जैसी असमर्थित सुविधाओं का उपयोग कर रहे हैं या क्योंकि वे मेननेट के बाहर किसी अन्य नेटवर्क से पूछताछ कर रहे हैं। यदि यह इंडेक्सिंग पुरस्कार उत्पन्न नहीं कर रहा है तो आपको सबग्राफ पर एक संदेश दिखाई देगा। +- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. -### हार्डवेयर आवश्यकताएँ क्या हैं? +### What are the hardware requirements? -- **छोटा** - कई सबग्राफ का अनुक्रमण शुरू करने के लिए पर्याप्त है, संभवतः इसे विस्तारित करने की आवश्यकता होगी। -- **मानक** - डिफ़ॉल्ट सेटअप, उदाहरण k8s/terraform परिनियोजन मेनिफेस्ट में इसका उपयोग किया जाता है। -- **माध्यम** - प्रोडक्शन इंडेक्सर प्रति सेकंड 100 सबग्राफ और 200-500 अनुरोधों का समर्थन करता है। -- **बड़ा** - वर्तमान में उपयोग किए जाने वाले सभी सबग्राफ को अनुक्रमित करने और संबंधित ट्रैफ़िक के अनुरोधों को पूरा करने के लिए तैयार है। +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. +- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| Setup | Postgres
    (CPUs) | Postgres
    (memory in GBs) | Postgres
    (disk in TBs) | VMs
    (CPUs) | VMs
    (memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Setup | Postgres
    (CPUs) | Postgres
    (memory in GBs) | Postgres
    (disk in TBs) | VMs
    (CPUs) | VMs
    (memory in GBs) | +| -------- | :------------------: | :---------------------------: | :-------------------------: | :-------------: | :----------------------: | +| Small | 4 | 8 | 1 | 4 | 16 | +| Standard | 8 | 30 | 1 | 12 | 48 | +| Medium | 16 | 64 | 2 | 32 | 64 | +| Large | 72 | 468 | 3.5 | 48 | 184 | -### एक इंडेक्सर को कौन सी बुनियादी सुरक्षा सावधानियां बरतनी चाहिए? +### What are some basic security precautions an Indexer should take? -- **ऑपरेटर वॉलेट** - ऑपरेटर वॉलेट सेट करना एक महत्वपूर्ण एहतियात है क्योंकि यह एक इंडेक्सर को उनकी चाबियों के बीच अलगाव बनाए रखने की अनुमति देता है जो हिस्सेदारी को नियंत्रित करती हैं और जो दिन-प्रतिदिन के संचालन को नियंत्रित करती हैं। निर्देशों के लिए [प्रोटोकॉल में हिस्सेदारी](/indexing/overview/#stake-in-the-protocol) देखें। +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. -- **फ़ायरवॉल** - केवल इंडेक्सर सेवा को सार्वजनिक रूप से प्रदर्शित करने की आवश्यकता है और एडमिन पोर्ट और डेटाबेस एक्सेस को लॉक करने पर विशेष ध्यान दिया जाना चाहिए: ग्राफ़ नोड JSON-RPC समापन बिंदु (डिफ़ॉल्ट पोर्ट): 8030), इंडेक्सर मैनेजमेंट एपीआई एंडपॉइंट (डिफ़ॉल्ट पोर्ट: 18000), और पोस्टग्रेज डेटाबेस एंडपॉइंट (डिफ़ॉल्ट पोर्ट: 5432) को उजागर नहीं किया जाना चाहिए। +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## आधारभूत संरचना +## Infrastructure -एक इंडेक्सर के बुनियादी ढांचे के केंद्र में ग्राफ नोड है जो एथेरियम की निगरानी करता है, एक सबग्राफ परिभाषा के अनुसार डेटा निकालता है और लोड करता है और इसे [GraphQL API](/about/#how-the-graph-works) के रूप में कार्य करता है। ग्राफ़ नोड को डेटा सोर्सिंग के लिए एथेरियम ईवीएम नोड एंडपॉइंट्स और आईपीएफएस नोड से कनेक्ट करने की आवश्यकता है; इसके स्टोर के लिए एक PostgreSQL डेटाबेस; और इंडेक्सर घटक जो नेटवर्क के साथ अपनी बातचीत की सुविधा प्रदान करते हैं। +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. -- **PostgreSQL डेटाबेस** - ग्राफ नोड के लिए मुख्य स्टोर, यह वह जगह है जहां सबग्राफ डेटा संग्रहीत किया जाता है। इंडेक्सर सेवा और एजेंट राज्य चैनल डेटा, लागत मॉडल, अनुक्रमण नियम और आवंटन क्रियाओं को संग्रहीत करने के लिए डेटाबेस का भी उपयोग करते हैं। +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. -- **Ethereum endpoint** - एक समापन बिंदु जो एक Ethereum JSON-RPC API को उजागर करता है। यह एकल एथेरियम क्लाइंट का रूप ले सकता है या यह एक अधिक जटिल सेटअप हो सकता है जो कई में संतुलन को लोड करता है। यह जानना महत्वपूर्ण है कि कुछ उप-अनुच्छेदों के लिए विशेष एथेरियम क्लाइंट क्षमताओं की आवश्यकता होगी जैसे कि आर्काइव मोड और ट्रेसिंग एपीआई। +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. -- **IPFS नोड (5 से कम संस्करण)** - सबग्राफ परिनियोजन मेटाडेटा IPFS नेटवर्क पर संग्रहीत है। सबग्राफ मैनिफ़ेस्ट और सभी लिंक की गई फ़ाइलों को लाने के लिए सबग्राफ़ परिनियोजन के दौरान ग्राफ़ नोड मुख्य रूप से IPFS नोड तक पहुँचता है। नेटवर्क इंडेक्सर्स को अपने स्वयं के IPFS नोड को होस्ट करने की आवश्यकता नहीं है, नेटवर्क के लिए एक IPFS नोड https://ipfs.network.thegraph.com पर होस्ट किया गया है। +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. -- **इंडेक्सर सेवा** - नेटवर्क के साथ सभी आवश्यक बाहरी संचार को संभालती है। शेयर लागत मॉडल और अनुक्रमण स्थिति, गेटवे से ग्राफ़ नोड पर क्वेरी अनुरोधों को पास करता है, और गेटवे के साथ राज्य चैनलों के माध्यम से क्वेरी भुगतान का प्रबंधन करता है। +- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. -- **इंडेक्सर एजेंट** - नेटवर्क पर पंजीकरण, इसके ग्राफ नोड/एस पर सबग्राफ परिनियोजन प्रबंधित करने और आवंटन प्रबंधित करने सहित श्रृंखला पर इंडेक्सर्स इंटरैक्शन की सुविधा प्रदान करता है। +- **Indexer agent** - Facilitates the Indexers interactions onchain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. -- **प्रोमेथियस मेट्रिक्स सर्वर** - ग्राफ नोड और इंडेक्सर घटक अपने मेट्रिक्स को मेट्रिक्स सर्वर पर लॉग करते हैं। +- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -नोट: फुर्तीली स्केलिंग का समर्थन करने के लिए, यह अनुशंसा की जाती है कि क्वेरी और इंडेक्सिंग चिंताओं को नोड्स के विभिन्न सेटों के बीच अलग किया जाए: क्वेरी नोड्स और इंडेक्स नोड्स। +Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. ### Ports overview -> **महत्वपूर्ण**: बंदरगाहों को सार्वजनिक रूप से उजागर करने के बारे में सावधान रहें - **प्रशासन बंदरगाहों** को बंद रखा जाना चाहिए। इसमें ग्राफ़ नोड JSON-RPC और अनुक्रमणिका प्रबंधन समापन बिंदु शामिल हैं जिनका विवरण नीचे दिया गया है। +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. -#### Graph Node +#### ग्राफ-नोड -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (सबग्राफ प्रश्नों के लिए) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (सबग्राफ सब्सक्रिप्शन के लिए) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (तैनाती के प्रबंधन के लिए) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Service -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
    (सबग्राफ प्रश्नों के लिए) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------------------------------------- | ----------------------------------------------------------- | --------------- | ---------------------- | +| 7600 | GraphQL HTTP server
    (for paid subgraph queries) | /subgraphs/id/...
    /status
    /channel-messages-inbox | \--port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | \--metrics-port | - | #### Indexer Agent -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ---------------------- | ------ | -------------------------- | --------------------------------------- | +| 8000 | Indexer management API | / | \--indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Google क्लाउड पर टेराफॉर्म का उपयोग करके सर्वर इंफ्रास्ट्रक्चर सेटअप करें +### Setup server infrastructure using Terraform on Google Cloud -> नोट: इंडेक्सर वैकल्पिक रूप से AWS, Microsoft Azure, या अलीबाबा का उपयोग कर सकते हैं। +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. #### Install prerequisites @@ -178,7 +178,7 @@ Once an allocation has been closed the rebates are available to be claimed by th - Kubectl command line tool - Terraform -#### एक Google क्लाउड प्रोजेक्ट बनाएं +#### Create a Google Cloud Project - Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). @@ -188,7 +188,7 @@ Once an allocation has been closed the rebates are available to be claimed by th cd terraform ``` -- Google क्लाउड से प्रमाणित करें और एक नया प्रोजेक्ट बनाएं। +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -196,7 +196,7 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- नए प्रोजेक्ट के लिए बिलिंग सक्षम करने के लिए Google क्लाउड कंसोल के बिलिंग पृष्ठ का उपयोग करें। +- Use the Google Cloud Console's billing page to enable billing for the new project. - Create a Google Cloud configuration. @@ -208,7 +208,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- आवश्यक Google क्लाउड एपीआई सक्षम करें। +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -217,7 +217,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- एक सेवा खाता बनाएँ। +- Create a service account. ```sh svc_name= @@ -235,7 +235,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- डेटाबेस और Kubernetes क्लस्टर के बीच पियरिंग सक्षम करें जो अगले चरण में बनाया जाएगा। +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -249,22 +249,22 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- न्यूनतम टेराफ़ॉर्म कॉन्फ़िगरेशन फ़ाइल बनाएँ (आवश्यकतानुसार अद्यतन करें)। +- Create minimal terraform configuration file (update as needed). ```sh indexer= cat > terraform.tfvars < ताकि यह k8s/base` निर्देशिका को इंगित करे। +- Copy the directory `k8s/overlays` to a new directory `$dir,` and adjust the `bases` entry in `$dir/kustomization.yaml` so that it points to the directory `k8s/base`. -- `$dir` में सभी फाइलों को पढ़ें और टिप्पणियों में बताए अनुसार किसी भी मान को समायोजित करें। +- Read through all the files in `$dir` and adjust any values as indicated in the comments. -`kubectl apply -k $dir` के साथ सभी संसाधन परिनियोजित करें। +Deploy all resources with `kubectl apply -k $dir`. -### Graph Node +### ग्राफ-नोड [Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the blockchain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. -#### स्रोत से प्रारंभ करना +#### Getting started from source #### Install prerequisites @@ -307,7 +307,7 @@ kubectl config use-context $(kubectl config get-contexts --output='name' - **IPFS** -- **उबंटू उपयोगकर्ताओं के लिए अतिरिक्त आवश्यकताएं** - उबंटू पर ग्राफ नोड चलाने के लिए कुछ अतिरिक्त पैकेजों की आवश्यकता हो सकती है। +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config @@ -315,7 +315,7 @@ sudo apt-get install -y clang libpg-dev libssl-dev pkg-config #### Setup -1. एक PostgreSQL डेटाबेस सर्वर प्रारंभ करें +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -323,9 +323,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. [ग्राफ़ नोड](https://github.com/graphprotocol/graph-node) रेपो क्लोन करें और `cargo build` चलाकर स्रोत बनाएं +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. अब जब सभी निर्भरताएँ स्थापित हो गई हैं, तो ग्राफ़ नोड प्रारंभ करें: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -334,28 +334,28 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### डॉकर का उपयोग शुरू करना +#### Getting started using Docker -#### आवश्यक शर्तें +#### Prerequisites -- **एथेरियम नोड** - डिफ़ॉल्ट रूप से, डॉकर कंपोज़ सेटअप मेननेट का उपयोग करेगा: [http://host.docker.internal:8545](http://host.docker.internal:8545) आपके होस्ट मशीन पर एथेरियम नोड से कनेक्ट करने के लिए। `docker-compose.yaml` को अपडेट करके आप इस नेटवर्क नाम और url को बदल सकते हैं। +- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. #### Setup -1. क्लोन ग्राफ़ नोड और डॉकर निर्देशिका पर नेविगेट करें: +1. Clone Graph Node and navigate to the Docker directory: ```sh git clone https://github.com/graphprotocol/graph-node cd graph-node/docker ``` -2. केवल Linux उपयोगकर्ताओं के लिए - शामिल स्क्रिप्ट का उपयोग करके `host.docker.internal` के बजाय `docker-compose.yaml` में होस्ट IP पते का उपयोग करें: +2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yaml `using the included script: ```sh ./setup.sh ``` -3. एक स्थानीय ग्राफ़ नोड प्रारंभ करें जो आपके एथेरियम समापन बिंदु से जुड़ जाएगा: +3. Start a local Graph Node that will connect to your Ethereum endpoint: ```sh docker-compose up @@ -363,15 +363,15 @@ docker-compose up ### Indexer components -नेटवर्क में सफलतापूर्वक भाग लेने के लिए लगभग निरंतर निगरानी और सहभागिता की आवश्यकता होती है, इसलिए हमने इंडेक्सर्स नेटवर्क भागीदारी को सुविधाजनक बनाने के लिए टाइपस्क्रिप्ट एप्लिकेशन का एक सूट बनाया है। तीन इंडेक्सर घटक हैं: +To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: -- **इंडेक्सर एजेंट** - एजेंट नेटवर्क और इंडेक्सर के अपने इंफ्रास्ट्रक्चर की निगरानी करता है और प्रबंधित करता है कि कौन से सबग्राफ परिनियोजन अनुक्रमित हैं और चेन पर आवंटित किए गए हैं और प्रत्येक के लिए कितना आवंटित किया गया है। +- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards onchain and how much is allocated towards each. -- **इंडेक्सर सेवा** - एकमात्र घटक जिसे बाहरी रूप से प्रदर्शित करने की आवश्यकता होती है, सेवा ग्राफ नोड को सबग्राफ क्वेरी भेजती है, क्वेरी भुगतान के लिए राज्य चैनल प्रबंधित करती है, महत्वपूर्ण निर्णय लेने की जानकारी साझा करती है गेटवे जैसे ग्राहकों के लिए। +- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. -- **इंडेक्सर सीएलआई** - इंडेक्सर एजेंट को प्रबंधित करने के लिए कमांड लाइन इंटरफ़ेस। यह इंडेक्सर्स को लागत मॉडल, मैन्युअल आवंटन, एक्शन क्यू और इंडेक्सिंग नियमों को प्रबंधित करने की अनुमति देता है। +- **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. -#### शुरू करना +#### Getting started The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/indexing/overview/#stake-in-the-protocol) before starting up your Indexer components! @@ -420,14 +420,14 @@ cd packages/indexer-cli #### Using docker -- रजिस्ट्री से चित्र खींचे +- Pull images from the registry ```sh docker pull ghcr.io/graphprotocol/indexer-service:latest docker pull ghcr.io/graphprotocol/indexer-agent:latest ``` -या स्रोत से स्थानीय रूप से छवियां बनाएं +Or build images locally from source ```sh # Indexer service @@ -442,24 +442,24 @@ docker build \ -t indexer-agent:latest \ ``` -- घटकों को चलाएँ +- Run the components ```sh docker run -p 7600:7600 -it indexer-service:latest ... docker run -p 18000:8000 -it indexer-agent:latest ... ``` -**ध्यान दें**: कंटेनर शुरू करने के बाद, अनुक्रमणिका सेवा [http://localhost:7600](http://localhost:7600) पर पहुंच योग्य होनी चाहिए और अनुक्रमणिका एजेंट को [http://localhost:18000/](http://localhost:18000/) पर अनुक्रमणिका प्रबंधन API को प्रदर्शित करना चाहिए। +**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). -#### K8s और टेराफॉर्म का उपयोग करना +#### Using K8s and Terraform -[Google क्लाउड पर Terraform का उपयोग करके सेटअप सर्वर इन्फ्रास्ट्रक्चर](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) अनुभाग देखें +See the [Setup Server Infrastructure Using Terraform on Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) section -#### प्रयोग +#### Usage -> **ध्यान दें**: सभी रनटाइम कॉन्फ़िगरेशन चर या तो स्टार्टअप पर कमांड के पैरामीटर के रूप में या `COMPONENT_NAME_VARIABLE_NAME`(उदा. `INDEXER_AGENT_ETHEREUM`) प्रारूप के पर्यावरण चर का उपयोग करके लागू किए जा सकते हैं). +> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### इंडेक्सर एजेंट +#### Indexer agent ```sh graph-indexer-agent start \ @@ -488,7 +488,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Indexer Service +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -516,22 +516,22 @@ graph-indexer-service start \ #### Indexer CLI -इंडेक्सर सीएलआई [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) के लिए एक प्लगइन है, जिसे `ग्राफ इंडेक्सर` पर टर्मिनल में एक्सेस किया जा सकता है। +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### इंडेक्सर सीएलआई का उपयोग कर इंडेक्सर प्रबंधन +#### Indexer management using Indexer CLI -**इंडेक्सर मैनेजमेंट एपीआई** के साथ इंटरैक्ट करने के लिए सुझाया गया टूल **इंडेक्सर सीएलआई** है, जो **ग्राफ़ सीएलआई** का विस्तार है। इंडेक्सर की ओर से नेटवर्क के साथ स्वायत्तता से इंटरैक्ट करने के लिए इंडेक्सर एजेंट को इंडेक्सर से इनपुट की आवश्यकता होती है। **आवंटन प्रबंधन** मोड और **अनुक्रमण नियम** इंडेक्सर एजेंट के व्यवहार को परिभाषित करने के तंत्र हैं। ऑटो मोड के तहत, एक इंडेक्सर **इंडेक्सिंग नियमों** का उपयोग इंडेक्स के लिए सबग्राफ चुनने और प्रश्नों को प्रस्तुत करने के लिए अपनी विशिष्ट रणनीति को लागू करने के लिए कर सकता है। नियमों को एजेंट द्वारा प्रदत्त ग्राफ़क्यूएल एपीआई के माध्यम से प्रबंधित किया जाता है और इसे इंडेक्सर मैनेजमेंट एपीआई के रूप में जाना जाता है। मैनुअल मोड के तहत, एक इंडेक्सर **कार्रवाई कतार** का उपयोग करके आवंटन क्रियाएं बना सकता है और उन्हें निष्पादित करने से पहले स्पष्ट रूप से अनुमोदित कर सकता है। निरीक्षण मोड के तहत, **अनुक्रमण नियम** का उपयोग **कार्रवाई कतार** को भरने के लिए किया जाता है और निष्पादन के लिए स्पष्ट स्वीकृति की भी आवश्यकता होती है। +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### प्रयोग +#### Usage -**इंडेक्सर सीएलआई** आमतौर पर पोर्ट-फ़ॉरवर्डिंग के माध्यम से इंडेक्सर एजेंट से जुड़ता है, इसलिए सीएलआई को उसी सर्वर या क्लस्टर पर चलाने की आवश्यकता नहीं होती है। आरंभ करने में आपकी मदद करने के लिए, और कुछ संदर्भ प्रदान करने के लिए, सीएलआई का संक्षेप में वर्णन यहाँ किया जाएगा। +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. -- `ग्राफ़ इंडेक्सर कनेक्ट ` - इंडेक्सर मैनेजमेंट एपीआई से कनेक्ट करें। आमतौर पर सर्वर से कनेक्शन पोर्ट फ़ॉरवर्डिंग के माध्यम से खोला जाता है, इसलिए सीएलआई को दूर से आसानी से संचालित किया जा सकता है। (उदाहरण: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) - `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. @@ -539,33 +539,33 @@ graph indexer status - `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. -- `ग्राफ़ इंडेक्सर नियम स्टॉप [विकल्प] <परिनियोजन-आईडी>` - किसी परिनियोजन को अनुक्रमित करना बंद करें और इसके `decisionBasis` को कभी भी सेट न करें, इसलिए यह परिनियोजन पर निर्णय लेते समय इस परिनियोजन को छोड़ देगा अनुक्रमणिका। +- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. -- `ग्राफ़ अनुक्रमणिका नियम हो सकता है [विकल्प] ` — परिनियोजन के लिए `निर्णय आधार` को `नियमों` पर सेट करें, ताकि अनुक्रमणिका एजेंट अनुक्रमण नियमों का उपयोग करेगा यह तय करने के लिए कि इस परिनियोजन को अनुक्रमित करना है या नहीं। +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. -- `ग्राफ़ Indexerक्रियाओं को [विकल्प] <कार्रवाई-आईडी>` मिलता है - `सभी` का उपयोग करके एक या अधिक क्रियाएं प्राप्त करें या प्राप्त करने के लिए `action-id` खाली छोड़ दें सभी क्रियाएं। एक अतिरिक्त तर्क `--status` का उपयोग किसी निश्चित स्थिति के सभी कार्यों को प्रिंट करने के लिए किया जा सकता है। +- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. -- `ग्राफ इंडेक्सर एक्शन कतार आवंटन ` - कतार आवंटन कार्रवाई +- `graph indexer action queue allocate ` - Queue allocation action - `graph indexer action queue reallocate ` - Queue reallocate action -- `ग्राफ़ अनुक्रमणिका कार्रवाई कतार अनावंटित करें <परिनियोजन-आईडी> <आवंटन-आईडी>` - क्यू अनलोकेट एक्शन +- `graph indexer action queue unallocate ` - Queue unallocate action -- `ग्राफ इंडेक्सर क्रियाएं रद्द [ ...]` - यदि आईडी निर्दिष्ट नहीं है तो कतार में सभी क्रियाएं रद्द करें, अन्यथा विभाजक के रूप में स्थान के साथ आईडी की सरणी रद्द करें +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator -- `ग्राफ़ अनुक्रमणिका क्रियाएँ स्वीकृत [<कार्रवाई-आईडी> ...]` - निष्पादन के लिए कई कार्यों को स्वीकृति दें +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `ग्राफ़ अनुक्रमणिका क्रियाएँ स्वीकृत निष्पादित करती हैं` - कार्यकर्ता को स्वीकृत क्रियाओं को तुरंत निष्पादित करने के लिए बाध्य करें +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -सभी आदेश जो आउटपुट में नियम प्रदर्शित करते हैं, समर्थित आउटपुट स्वरूपों (`तालिका`, `yaml`, और `json`) के बीच `का उपयोग करके चुन सकते हैं - आउटपुट` तर्क। +All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. #### Indexing rules -इंडेक्सिंग नियम या तो वैश्विक डिफ़ॉल्ट के रूप में लागू किए जा सकते हैं या विशिष्ट सबग्राफ परिनियोजन के लिए उनकी आईडी का उपयोग कर सकते हैं। `परिनियोजन` और `निर्णय के आधार` फ़ील्ड अनिवार्य हैं, जबकि अन्य सभी फ़ील्ड वैकल्पिक हैं। जब एक इंडेक्सिंग नियम में `नियम` `निर्णय के आधार` के रूप में होते हैं, तो इंडेक्सर एजेंट उस नियम पर गैर-शून्य थ्रेशोल्ड मानों की तुलना संबंधित परिनियोजन के लिए नेटवर्क से प्राप्त मानों से करेगा। यदि सबग्राफ परिनियोजन में किसी भी सीमा के ऊपर (या नीचे) मान हैं, तो इसे अनुक्रमण के लिए चुना जाएगा। +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -उदाहरण के लिए, यदि वैश्विक नियम में **5** (GRT) का `minStake` है, तो कोई भी सबग्राफ परिनियोजन जिसमें 5 (GRT) से अधिक हिस्सेदारी हो इसे आवंटित अनुक्रमित किया जाएगा। थ्रेशोल्ड नियमों में `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, और `minAverageQueryFees` शामिल हैं। +For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. -डेटा मॉडल: +Data model: ```graphql type IndexingRule { @@ -599,7 +599,7 @@ IndexingDecisionBasis { } ``` -अनुक्रमण नियम का उदाहरण उपयोग: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -613,18 +613,18 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK #### Actions queue CLI -इंडेक्सर-क्ली एक्शन क्यू के साथ मैन्युअल रूप से काम करने के लिए `कार्रवाई` मॉड्यूल प्रदान करता है। यह क्रिया क्यू के साथ इंटरैक्ट करने के लिए अनुक्रमणिका प्रबंधन सर्वर द्वारा होस्ट किए गए **Graphql API** का उपयोग करता है। +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. -क्रिया निष्पादन कार्यकर्ता कतार से केवल आइटमों को निष्पादित करने के लिए पकड़ेगा यदि उनके पास `ActionStatus = स्वीकृत` है। अनुशंसित पथ में कार्रवाई कतार में ActionStatus = कतारबद्ध के साथ जोड़ी जाती है, इसलिए उन्हें ऑन-चेन निष्पादित करने के लिए अनुमोदित किया जाना चाहिए। सामान्य प्रवाह इस तरह दिखेगा: +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed onchain. The general flow will look like: -- तृतीय पक्ष ऑप्टिमाइज़र टूल या इंडेक्सर-क्ली उपयोगकर्ता द्वारा कतार में जोड़ी गई कार्रवाई -- सभी कतारबद्ध क्रियाओं को देखने के लिए इंडेक्सर `indexer-cli` का उपयोग कर सकता है -- इंडेक्सर (या अन्य सॉफ़्टवेयर) `इंडेक्सर-क्ली` का उपयोग करके कतार में क्रियाओं को स्वीकृत या रद्द कर सकता है। स्वीकृति और रद्द करने के आदेश इनपुट के रूप में क्रिया आईडी की एक सरणी लेते हैं। -- निष्पादन कार्यकर्ता नियमित रूप से स्वीकृत कार्यों के लिए कतार का चुनाव करता है। यह कतार से `अनुमोदित` कार्यों को पकड़ लेगा, उन्हें निष्पादित करने का प्रयास करेगा, और निष्पादन की स्थिति के आधार पर डीबी में मूल्यों को `सफलता` या `विफल< पर अपडेट करेगा। /0>.
  • -
  • If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in auto` or `oversight` mode. -- अनुक्रमणक क्रिया निष्पादन के इतिहास को देखने के लिए क्रिया कतार की निगरानी कर सकता है और यदि आवश्यक हो तो निष्पादन विफल होने पर क्रिया आइटम को पुन: अनुमोदित और अद्यतन कर सकता है। क्रिया कतार पंक्तिबद्ध और की गई सभी कार्रवाइयों का इतिहास प्रदान करती है। +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -डेटा मॉडल: +Data model: ```graphql Type ActionInput { @@ -657,7 +657,7 @@ ActionType { } ``` -स्रोत से उदाहरण उपयोग: +Example usage from source: ```bash graph indexer actions get all @@ -677,44 +677,44 @@ graph indexer actions approve 1 3 5 graph indexer actions execute approve ``` -ध्यान दें कि आवंटन प्रबंधन के लिए समर्थित क्रिया प्रकारों की अलग-अलग इनपुट आवश्यकताएं होती हैं: +Note that supported action types for allocation management have different input requirements: -- `आवंटित करें` - एक विशिष्ट सबग्राफ परिनियोजन के लिए हिस्सेदारी आवंटित करें +- `Allocate` - allocate stake to a specific subgraph deployment - - आवश्यक क्रिया पैरामीटर: + - required action params: - deploymentID - amount -- `Unallocate` - करीब आवंटन, हिस्सेदारी को कहीं और फिर से आवंटित करने के लिए मुक्त करना +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - आवश्यक क्रिया पैरामीटर: + - required action params: - allocationID - deploymentID - - वैकल्पिक क्रिया पैरामीटर: + - optional action params: - poi - - बल (प्रदान किए गए POI का उपयोग करने पर बल देता है, भले ही वह ग्राफ़-नोड द्वारा प्रदान किए गए मेल से मेल न खाता हो) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `Reallocate` - परमाणु रूप से आवंटन बंद करें और समान सबग्राफ परिनियोजन के लिए एक नया आवंटन खोलें +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - आवश्यक क्रिया पैरामीटर: + - required action params: - allocationID - deploymentID - amount - - वैकल्पिक क्रिया पैरामीटर: + - optional action params: - poi - - बल (प्रदान किए गए POI का उपयोग करने पर बल देता है, भले ही वह ग्राफ़-नोड द्वारा प्रदान किए गए मेल से मेल न खाता हो) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) #### Cost models -लागत मॉडल बाजार और क्वेरी विशेषताओं के आधार पर प्रश्नों के लिए गतिशील मूल्य निर्धारण प्रदान करते हैं। अनुक्रमणिका सेवा प्रत्येक सबग्राफ के लिए गेटवे के साथ एक लागत मॉडल साझा करती है जिसके लिए वे प्रश्नों का उत्तर देना चाहते हैं। गेटवे, बदले में, प्रति प्रश्न अनुक्रमणिका चयन निर्णय लेने और चुने गए अनुक्रमणकों के साथ भुगतान पर बातचीत करने के लिए लागत मॉडल का उपयोग करते हैं। +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -अगोरा भाषा प्रश्नों के लिए लागत मॉडल घोषित करने के लिए एक लचीला प्रारूप प्रदान करती है। एक एगोरा मूल्य मॉडल बयानों का एक क्रम है जो एक ग्राफक्यूएल क्वेरी में प्रत्येक शीर्ष-स्तरीय क्वेरी के क्रम में निष्पादित होता है। प्रत्येक शीर्ष-स्तरीय क्वेरी के लिए, पहला कथन जो उससे मेल खाता है, उस क्वेरी के लिए मूल्य निर्धारित करता है। +The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. -एक कथन में एक विधेय शामिल होता है, जिसका उपयोग ग्राफक्यूएल प्रश्नों के मिलान के लिए किया जाता है, और एक लागत अभिव्यक्ति होती है, जिसका मूल्यांकन दशमलव GRT में लागत को आउटपुट करता है। क्वेरी के नामित तर्क स्थिति में मान विधेय में कैप्चर किए जा सकते हैं और अभिव्यक्ति में उपयोग किए जा सकते हैं। एक अभिव्यक्ति में प्लेसहोल्डर्स के लिए ग्लोबल्स भी सेट और प्रतिस्थापित किए जा सकते हैं। +A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. -उदाहरण लागत मॉडल: +Example cost model: ``` # This statement captures the skip value, @@ -727,64 +727,64 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -उपरोक्त मॉडल का उपयोग करके उदाहरण क्वेरी लागत: +Example query costing using the above model: -| जिज्ञासा | कीमत | +| Query | Price | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | -#### लागत मॉडल लागू करना +#### Applying the cost model -लागत मॉडल इंडेक्सर सीएलआई के माध्यम से लागू किए जाते हैं, जो उन्हें डेटाबेस में स्टोर करने के लिए इंडेक्सर एजेंट के इंडेक्सर मैनेजमेंट एपीआई को भेजता है। इंडेक्सर सर्विस तब उन्हें उठाएगी और जब भी वे उनके लिए मांगेंगे, गेटवे को लागत मॉडल की सेवा देगी। +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## नेटवर्क के साथ बातचीत करना +## Interacting with the network -### प्रोटोकॉल में हिस्सेदारी +### Stake in the protocol The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. > Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). -एक बार एक इंडेक्सर प्रोटोकॉल में जीआरटी को दांव पर लगा देता है, तो [इंडेक्सर घटक](/indexing/overview/#indexer-components) को शुरू किया जा सकता है और नेटवर्क के साथ उनकी बातचीत शुरू की जा सकती है। +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. -#### टोकन स्वीकृत करें +#### Approve tokens -1. ब्राउज़र में [रीमिक्स ऐप](https://remix.ethereum.org/) खोलें +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `फाइल एक्सप्लोरर` में [टोकन एबीआई](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json) के साथ **GraphToken.abi** नाम की एक फाइल बनाएं। +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). 3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. ग्राफटोकन अनुबंध पता सेट करें - `पते पर` के बगल में ग्राफटोकन अनुबंध पता (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) पेस्ट करें और आवेदन करने के लिए `पते पर` बटन पर क्लिक करें। +5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. -6. स्टेकिंग अनुबंध को स्वीकृत करने के लिए `स्वीकृति (खर्च, राशि)` फ़ंक्शन को कॉल करें। स्टेकिंग अनुबंध पते के साथ `खर्च` भरें (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) और `राशि` दांव लगाने के लिए टोकन के साथ (वी में)। +6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). -#### स्टेक टोकन +#### Stake tokens -1. ब्राउज़र में [रीमिक्स ऐप](https://remix.ethereum.org/) खोलें +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `फाइल एक्सप्लोरर` में स्टेकिंग एबीआई के साथ **Stakeing.abi** नाम की एक फाइल बनाएं। +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. स्टेकिंग अनुबंध पता सेट करें - `पते पर` के आगे स्टेकिंग अनुबंध पता (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) पेस्ट करें और आवेदन करने के लिए `पते पर` बटन पर क्लिक करें। +5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. -6. प्रोटोकॉल में GRT को दांव पर लगाने के लिए `दांव()` पर कॉल करें। +6. Call `stake()` to stake GRT in the protocol. -7. (वैकल्पिक) इंडेक्सर्स अपने इंडेक्सर इंफ्रास्ट्रक्चर के लिए ऑपरेटर होने के लिए एक और पते को मंजूरी दे सकते हैं ताकि उन कुंजियों को अलग किया जा सके जो उपग्राफों पर आवंटन और सेवा (भुगतान) प्रश्नों को दिन-प्रतिदिन क्रियाएं कर रहे हैं। ऑपरेटर को सेट करने के लिए `setOperator()` को ऑपरेटर के पते के साथ कॉल करें। +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. -8. (वैकल्पिक) पुरस्कारों के वितरण को नियंत्रित करने और प्रतिनिधियों को रणनीतिक रूप से आकर्षित करने के लिए इंडेक्सर्स अपने इंडेक्सिंग रिवार्डकट (पार्ट्स पर मिलियन), queryFeeCut (पार्ट्स प्रति मिलियन), और कूलडाउनब्लॉक्स (ब्लॉक्स की संख्या) को अपडेट करके अपने डेलिगेशन पैरामीटर्स को अपडेट कर सकते हैं। ऐसा करने के लिए `setDelegationParameters()` को कॉल करें। निम्न उदाहरण queryFeeCut को अनुक्रमणिका को 95% क्वेरी छूट और डेलिगेटर को 5% वितरित करने के लिए सेट करता है, indexingRewardCutto को अनुक्रमणिका पुरस्कार का 60% और प्रतिनिधि को 40% वितरित करने के लिए सेट करता है, और `thecooldownBlocks` सेट करता है 500 ब्लॉक की अवधि। +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. ``` setDelegationParameters(950000, 600000, 500) @@ -806,12 +806,12 @@ To set the delegation parameters using Graph Explorer interface, follow these st > Note: This transaction will need to be confirmed by the multisig wallet signers. -### आवंटन का जीवन +### The life of an allocation After being created by an Indexer a healthy allocation goes through two states. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Active** - Once an allocation is created onchain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). -इंडेक्सर्स को ऑन-चेन आवंटन बनाने से पहले चेनहेड में सबग्राफ परिनियोजन को सिंक करने के लिए ऑफ-चेन सिंकिंग कार्यक्षमता का उपयोग करने की सिफारिश की जाती है। यह सुविधा सबग्राफ के लिए विशेष रूप से उपयोगी है जो सिंक करने के लिए 28 से अधिक समय ले सकती है या अनिश्चित रूप से विफल होने की कुछ संभावनाएं हैं। +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation onchain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. From 53e35d8c70a2a84b56d083ef5a6d9a374ab329a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:57 -0500 Subject: [PATCH 0090/1534] New translations tap.mdx (Romanian) --- website/src/pages/ro/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ro/indexing/tap.mdx b/website/src/pages/ro/indexing/tap.mdx index 4132216ca5b6..3bab672ab211 100644 --- a/website/src/pages/ro/indexing/tap.mdx +++ b/website/src/pages/ro/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From a0552e971415270fb3ec4f4e6f1e107b0aa7081d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:58 -0500 Subject: [PATCH 0091/1534] New translations tap.mdx (French) --- website/src/pages/fr/indexing/tap.mdx | 192 +++++++++++++------------- 1 file changed, 96 insertions(+), 96 deletions(-) diff --git a/website/src/pages/fr/indexing/tap.mdx b/website/src/pages/fr/indexing/tap.mdx index bf5c6eb3988f..92fdd02e2082 100644 --- a/website/src/pages/fr/indexing/tap.mdx +++ b/website/src/pages/fr/indexing/tap.mdx @@ -1,193 +1,193 @@ --- -title: TAP Migration Guide +title: Guide de migration TAP --- -Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. +Découvrez le nouveau système de paiement de The Graph, le **Timeline Aggregation Protocol, TAP**. Ce système permet des microtransactions rapides et efficaces avec une confiance minimale. ## Aperçu -[TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: +[TAP](https://docs.rs/tap_core/latest/tap_core/index.html) est un remplacement direct du système de paiement Scalar actuellement en place. Il offre les fonctionnalités clés suivantes : -- Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. -- Allows Indexers control of receipts and payments, guaranteeing payment for queries. -- It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. +- Gère efficacement les micropaiements. +- Ajoute une couche de consolidations aux transactions et aux coûts onchain. +- Permet aux Indexeurs de contrôler les recettes et les paiements, garantissant ainsi le paiement des requêtes. +- Il permet des passerelles décentralisées, sans confiance, et améliore les performances du service d'indexation pour les expéditeurs multiples. ## Spécificités⁠ -TAP allows a sender to make multiple payments to a receiver, **TAP Receipts**, which aggregates these payments into a single payment, a **Receipt Aggregate Voucher**, also known as a **RAV**. This aggregated payment can then be verified on the blockchain, reducing the number of transactions and simplifying the payment process. +Le TAP permet à un expéditeur d'effectuer plusieurs paiements à un destinataire, **TAP Receipts**, qui regroupe ces paiements en un seul paiement, un **Receipt Aggregate Voucher**, également connu sous le nom de **RAV**. Ce paiement regroupé peut ensuite être vérifié sur la blockchain, ce qui réduit le nombre de transactions et simplifie le processus de paiement. -For each query, the gateway will send you a `signed receipt` that is stored on your database. Then, these queries will be aggregated by a `tap-agent` through a request. Afterwards, you’ll receive a RAV. You can update a RAV by sending it with newer receipts and this will generate a new RAV with an increased value. +Pour chaque requête, la passerelle vous enverra un `reçu signé` qui sera stocké dans votre base de données. Ensuite, ces requêtes seront agrégées par un `tap-agent` par le biais d'une demande. Vous recevrez ensuite un RAV. Vous pouvez mettre à jour un RAV en l'envoyant avec des reçus plus récents, ce qui générera un nouveau RAV avec une valeur plus élevée. -### RAV Details +### À propos des RAV -- It’s money that is waiting to be sent to the blockchain. +- C'est de l'argent qui attend d'être envoyé à la blockchain. -- It will continue to send requests to aggregate and ensure that the total value of non-aggregated receipts does not exceed the `amount willing to lose`. +- Il continuera à envoyer des demandes d'agrégation et veillera à ce que la valeur totale des recettes non agrégées ne dépasse pas le « montant que vous êtes prêt à risquer de perdre ». -- Each RAV can be redeemed once in the contracts, which is why they are sent after the allocation is closed. +- Chaque RAV ne peut être racheté qu'une seule fois dans les contrats, c'est pourquoi ils sont envoyés après la clôture de l'allocation. -### Redeeming RAV +### Rachat de RAV -As long as you run `tap-agent` and `indexer-agent`, everything will be executed automatically. The following provides a detailed breakdown of the process: +Tant que vous exécutez `tap-agent` et `indexer-agent`, tout sera exécuté automatiquement. Voici une description détaillée du processus : -1. An Indexer closes allocation. +1. Un Indexeur clôture l'allocation. -2. ` period, tap-agent` takes all pending receipts for that specific allocation and requests an aggregation into a RAV, marking it as `last`. +2. Pendant la période `, tap-agent` regroupe tous les reçus (receipts) en attente pour cette allocation particulière et demande leur agrégation dans un RAV, en le marquant comme « dernier » (last). -3. `indexer-agent` takes all the last RAVS and sends redeem requests to the blockchain, which will update the value of `redeem_at`. +3. `indexer-agent` récupère tous les derniers RAVS et envoie des demandes de rachat à la blockchain, qui mettra à jour la valeur de `redeem_at`. -4. During the `` period, `indexer-agent` monitors if the blockchain has any reorganizations that revert the transaction. +4. Pendant la période ``, `indexer-agent` surveille si la blockchain a des réorganisations qui annulent la transaction. - - If it was reverted, the RAV is resent to the blockchain. If it was not reverted, it gets marked as `final`. + - Si elle a été annulée, la RAV est renvoyée à la blockchain. S'il n'a pas été modifié, il est marqué comme `final`. -## Blockchain Addresses +## Adresses sur la blockchain -### Contracts +### Contrats -| Contract | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | -| ------------------- | -------------------------------------------- | -------------------------------------------- | -| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | -| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | -| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | +| Contrat | Mainnet Arbitrum (42161) | Arbitrum Sepolia (421614) | +| ---------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Tiers de confiance (Escrow) | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | -### Gateway +### Passerelle (Gateway) -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | -| ---------- | --------------------------------------------- | --------------------------------------------- | -| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| Composant | Mainnet Node et Edge (Arbitrum Mainnet) | Testnet Node et Edge (Arbitrum Mainnet) | +| -------------- | --------------------------------------------- | --------------------------------------------- | +| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Signataires | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Aggregateur | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Exigences -In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. +En plus des conditions typiques pour faire fonctionner un Indexeur, vous aurez besoin d'un Endpoint `tap-escrow-subgraph` pour interroger les mises à jour de TAP. Vous pouvez utiliser The Graph Network pour interroger ou vous héberger vous-même sur votre `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Subgraph Graph TAP Arbitrum Sepolia (pour le testnet The Graph )](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Subgraph Graph TAP Arbitrum One (Pour le mainnet The Graph )](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note : `indexer-agent` ne gère pas actuellement l'indexation de ce subgraph comme il le fait pour le déploiement du subgraph réseau. Par conséquent, vous devez l'indexer manuellement. -## Migration Guide +## Guide De Migration -### Software versions +### Versions du logiciel -The required software version can be found [here](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). +La version requise du logiciel peut être trouvée [ici](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). -### Steps +### Étapes -1. **Indexer Agent** +1. **Agent d'indexeur** - - Follow the [same process](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). - - Give the new argument `--tap-subgraph-endpoint` to activate the new TAP codepaths and enable redeeming of TAP RAVs. + - Suivez le [même processus](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). + - Donnez le nouvel argument `--tap-subgraph-endpoint` pour activer les nouveaux chemins de code TAP et permettre l'échange de RAVs TAP. 2. **Indexer Service** - - Fully replace your current configuration with the [new Indexer Service rs](https://github.com/graphprotocol/indexer-rs). It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). - - Like the older version, you can scale Indexer Service horizontally easily. It is still stateless. + - Remplacez complètement votre configuration actuelle par le [nouveau service d'Indexeur rs](https://github.com/graphprotocol/indexer-rs). Il est recommandé d'utiliser l'[image du conteneur](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Comme dans la version précédente, vous pouvez facilement faire monter Indexer Service en charge (scaling horizontal) car il reste sans état (stateless). 3. **TAP Agent** - - Run _one_ single instance of [TAP Agent](https://github.com/graphprotocol/indexer-rs) at all times. It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Exécutez _une_ seule instance de [Agent TAP](https://github.com/graphprotocol/indexer-rs) à tout moment. Il est recommandé d'utiliser l'[image de conteneur](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). -4. **Configure Indexer Service and TAP Agent** +4. **Configuration de l’Indexer Service et du TAP Agent** - Configuration is a TOML file shared between `indexer-service` and `tap-agent`, supplied with the argument `--config /path/to/config.toml`. + La configuration se fait via un fichier TOML partagé entre `indexer-service` et `tap-agent`, passé en paramètre par `--config /path/to/config.toml`. - Check out the full [configuration](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) and the [default values](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) + Consultez la [configuration complète](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) et les [valeurs par défaut](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) -For minimal configuration, use the following template: +Pour une configuration minimale, utilisez le modèle suivant : ```bash -# You will have to change *all* the values below to match your setup. +# Vous devrez modifier *toutes* les valeurs ci-dessous pour qu'elles correspondent à votre configuration. # -# Some of the config below are global graph network values, which you can find here: +# Certaines des configurations ci-dessous sont des valeurs globales de graph network, que vous pouvez trouver ici : # # -# Pro tip: if you need to load some values from the environment into this config, you -# can overwrite with environment variables. For example, the following can be replaced -# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# Astuce de pro : si vous devez charger certaines valeurs de l'environnement dans cette configuration, vous +# pouvez les écraser avec des variables d'environnement. Par exemple, ce qui suit peut être remplacé +# par [PREFIX]_DATABASE_POSTGRESURL, où PREFIX peut être `INDEXER_SERVICE` ou `TAP_AGENT` : # # [database] # postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" [indexer] -indexer_address = "0x1111111111111111111111111111111111111111" +indexer_address = "0x111111111111111111111111111111111111111111" operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] -# The URL of the Postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. +# L'URL de la base de données Postgres utilisée pour les composants de l'Indexeur. La même base de données +# qui est utilisée par `indexer-agent`. Il est prévu que `indexer-agent` crée +# les tables nécessaires. postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] -# URL to your graph-node's query endpoint +# URL vers l'endpoint de requête de votre graph-node query_url = "" -# URL to your graph-node's status endpoint +# URL vers l'endpoint d'état de votre graph-node status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# URL de requête pour le subgraph Graph Network. query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only -deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# Facultatif, déploiement à rechercher dans le `graph-node` local, s'il est indexé localement. +# L'indexation locale du subgraph est recommandée. +# REMARQUE : utilisez uniquement `query_url` ou `deployment_id` +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# URL de requête pour le subgraph Escrow. query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only -deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# Facultatif, déploiement à rechercher dans le `graph-node` local, s'il est indexé localement. +# Il est recommandé d'indexer localement le subgraph. +# REMARQUE : utilisez uniquement `query_url` ou `deployment_id` +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] -# The chain ID of the network that the graph network is running on +# Le chain ID du réseau sur lequel The Graph Network s'exécute chain_id = 1337 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. -receipts_verifier_address = "0x2222222222222222222222222222222222222222" +# Adresse du contrat du vérificateur de bon de réception agrégé (RAV) de TAP. +receives_verifier_address = "0x222222222222222222222222222222222222222222222" -######################################## -# Specific configurations to tap-agent # -######################################## +############################################ +# Configurations spécifiques à tap-agent # +########################################## [tap] -# This is the amount of fees you are willing to risk at any given time. For ex. -# if the sender stops supplying RAVs for long enough and the fees exceed this -# amount, the indexer-service will stop accepting queries from the sender -# until the fees are aggregated. -# NOTE: Use strings for decimal values to prevent rounding errors -# e.g: +# Il s'agit du montant des frais que vous êtes prêt à risquer à un moment donné. Par exemple, +# si l'expéditeur cesse de fournir des RAV pendant suffisamment longtemps et que les frais dépassent ce +# montant, le service d'indexation cessera d'accepter les requêtes de l'expéditeur +# jusqu'à ce que les frais soient agrégés. +# REMARQUE : utilisez des chaînes de caractère pour les valeurs décimales afin d'éviter les erreurs d'arrondi +# p. ex. : # max_amount_willing_to_lose_grt = "0.1" max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -# Key-Value of all senders and their aggregator endpoints -# This one below is for the E&N testnet gateway for example. +# Clé-valeur de tous les expéditeurs et de leurs endpoint d'agrégation +# Celle-ci ci-dessous concerne par exemple la passerelle de testnet E&N. 0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" ``` Notez : -- Values for `tap.sender_aggregator_endpoints` can be found in the [gateway section](/indexing/tap/#gateway). -- Values for `blockchain.receipts_verifier_address` must be used accordingly to the [Blockchain addresses section](/indexing/tap/#contracts) using the appropriate chain id. +- Les valeurs de `tap.sender_aggregator_endpoints` peuvent être trouvées dans la section [passerelle](/indexing/tap/#gateway). +- Les valeurs de `blockchain.receipts_verifier_address` doivent être utilisées conformément à la [Section des adresses Blockchain](/indexing/tap/#contracts) en utilisant le chain id approprié. -**Log Level** +**Niveau de journalisation** -- You can set the log level by using the `RUST_LOG` environment variable. -- It’s recommended that you set it to `RUST_LOG=indexer_tap_agent=debug,info`. +- Vous pouvez définir le niveau de journalisation en utilisant la variable d'environnement `RUST_LOG`. +- Il est recommandé de le mettre à `RUST_LOG=indexer_tap_agent=debug,info`. ## Monitoring -### Metrics +### Métriques -All components expose the port 7300 to be queried by prometheus. +Tous les composants exposent le port 7300 qui peut être interrogé par prometheus. -### Grafana Dashboard +### Tableau de bord Grafana -You can download [Grafana Dashboard](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) and import. +Vous pouvez télécharger le [Tableau de bord Grafana](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) et l'importer. ### Launchpad -Currently, there is a WIP version of `indexer-rs` and `tap-agent` that can be found [here](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) +Actuellement, il existe une version WIP de `indexer-rs` et de `tap-agent` qui peut être trouvée [ici](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) From 69a0d81a7f4ea9e19b7f6deaace78e79e7693a4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:44:59 -0500 Subject: [PATCH 0092/1534] New translations tap.mdx (Spanish) --- website/src/pages/es/indexing/tap.mdx | 128 +++++++++++++++++++++++++- 1 file changed, 123 insertions(+), 5 deletions(-) diff --git a/website/src/pages/es/indexing/tap.mdx b/website/src/pages/es/indexing/tap.mdx index 5292ca8f6bf6..9d5f769f14f3 100644 --- a/website/src/pages/es/indexing/tap.mdx +++ b/website/src/pages/es/indexing/tap.mdx @@ -1,5 +1,123 @@ --- -title: TAP Migration Guide +title: |+ + Guía de Migración TAP + Aprende sobre el nuevo sistema de pagos de The Graph, el Protocolo de Agregación de Línea de Tiempo (TAP). Este sistema ofrece microtransacciones rápidas y eficientes con una confianza minimizada. + + Descripción General + TAP es un reemplazo directo del sistema de pagos Scalar actualmente en uso. Ofrece las siguientes características clave: + + Manejo eficiente de micropagos. + Agrega una capa de consolidación a las transacciones y costos en la cadena. + Permite a los Indexadores controlar los recibos y pagos, garantizando el pago por consultas. + Facilita puertas de enlace descentralizadas y sin confianza, mejorando el indexer-service para múltiples remitentes. + + Especificaciones + TAP permite que un remitente realice múltiples pagos a un receptor a través de TAP Receipts, los cuales agrupan estos pagos en un único pago denominado Receipt Aggregate Voucher (RAV). Este pago consolidado puede verificarse en la blockchain, reduciendo la cantidad de transacciones y simplificando el proceso de pago. + + Para cada consulta, la puerta de enlace te enviará un recibo firmado (signed receipt) que se almacenará en tu base de datos. Luego, estas consultas serán agrupadas por un tap-agent mediante una solicitud. Posteriormente, recibirás un RAV. Puedes actualizar un RAV enviándolo con recibos más recientes, lo que generará un nuevo RAV con un valor incrementado. + + Detalles del RAV + Es dinero que está pendiente de ser enviado a la blockchain. + Continuará enviando solicitudes para agrupar recibos y garantizar que el valor total de los recibos no agregados no supere la cantidad dispuesta a perder. + Cada RAV puede ser canjeado una sola vez en los contratos, por lo que se envían después de que la asignación se haya cerrado. + + Canjeo de RAV + Mientras ejecutes tap-agent e indexer-agent, todo el proceso se ejecutará automáticamente. A continuación, se presenta un desglose detallado del proceso: + + Proceso de Canjeo de RAV + 1. Un Indexador cierra la asignación. + 2. Durante el período , tap-agent toma todos los recibos pendientes de esa asignación específica y solicita su agregación en un RAV, marcándolo como el último. + 3. Indexer-agent toma todos los últimos RAVs y envía solicitudes de canje a la blockchain, lo que actualizará el valor de redeem_at. + 4. Durante el período , indexer-agent monitorea si la blockchain experimenta alguna reorganización que revierta la transacción. + Si la transacción es revertida, el RAV se reenvía a la blockchain. Si no es revertida, se marca como final. + + Blockchain Addresses + Contracts + Contract Arbitrum Mainnet (42161) Arbitrum Sepolia (421614) + TAP Verifier 0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a 0xfC24cE7a4428A6B89B52645243662A02BA734ECF + AllocationIDTracker 0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c 0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11 + Escrow 0x8f477709eF277d4A880801D01A140a9CF88bA0d3 0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02 + Gateway + Component Edge and Node Mainnet (Arbitrum Mainnet) Edge and Node Testnet (Arbitrum Sepolia) + Sender 0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 0xC3dDf37906724732FfD748057FEBe23379b0710D + Signers 0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211 0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE + Aggregator https://tap-aggregator.network.thegraph.com https://tap-aggregator.testnet.thegraph.com + + Requisitos + Además de los requisitos habituales para ejecutar un indexador, necesitarás un endpoint tap-escrow-subgraph para consultar actualizaciones de TAP. Puedes utilizar The Graph Network para hacer consultas o alojarlo en tu propio graph-node. + + Subgrafo Graph TAP Arbitrum Sepolia (para la testnet de The Graph). + Subgrafo Graph TAP Arbitrum One (para la mainnet de The Graph). + + Nota: Actualmente, indexer-agent no gestiona la indexación de este subgrafo como lo hace con la implementación del subgrafo de la red. Por lo tanto, debes indexarlo manualmente. + + Guía de Migración + Versiones de Software + La versión requerida del software se puede encontrar aquí. + + Pasos + 1. Indexer Agent + Sigue el mismo proceso de configuración. + Agrega el nuevo argumento --tap-subgraph-endpoint para activar las rutas de código de TAP y habilitar el canje de RAVs de TAP. + 2. Indexer Service + Reemplaza completamente tu configuración actual con la nueva versión de Indexer Service rs. Se recomienda usar la imagen del contenedor. + Como en la versión anterior, puedes escalar Indexer Service horizontalmente con facilidad. Sigue siendo stateless. + 3. TAP Agent + Ejecuta una única instancia de TAP Agent en todo momento. Se recomienda usar la imagen del contenedor. + 4. Configura Indexer Service y TAP Agent mediante un archivo TOML compartido, suministrado con el argumento --config /path/to/config.toml. + Consulta la configuración completa y los valores predeterminados. + Para una configuración mínima, usa la siguiente plantilla: + + toml + Copy + Edit + [indexer] + indexer_address = "0x1111111111111111111111111111111111111111" + operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" + + [database] + postgres_url = "postgres://postgres@postgres:5432/postgres" + + [graph_node] + query_url = "http://graph-node:8000" + status_url = "http://graph-node:8000/graphql" + + [subgraphs.network] + query_url = "http://example.com/network-subgraph" + deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + [subgraphs.escrow] + query_url = "http://example.com/network-subgraph" + deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + [blockchain] + chain_id = 1337 + receipts_verifier_address = "0x2222222222222222222222222222222222222222" + + [tap] + max_amount_willing_to_lose_grt = 20 + + [tap.sender_aggregator_endpoints] + 0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" + Notas Importantes + Los valores de tap.sender_aggregator_endpoints se encuentran en la sección de gateway. + El valor de blockchain.receipts_verifier_address debe coincidir con la sección de direcciones de Blockchain según el chain ID apropiado. + Nivel de Registro (Log Level) + Puedes establecer el nivel de registro con la variable de entorno RUST_LOG. Se recomienda: + + bash + Copy + Edit + RUST_LOG=indexer_tap_agent=debug,info + Monitoreo + Métricas + Todos los componentes exponen el puerto 7300, que puede ser consultado por Prometheus. + + Grafana Dashboard + Puedes descargar el Dashboard de Grafana e importarlo. + + Launchpad + Actualmente, hay una versión en desarrollo de indexer-rs y tap-agent, que puedes encontrar aquí. --- Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. @@ -9,7 +127,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +171,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +181,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From 40851213411cd9a8e95837cc9b13bb79bfbbc09a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:00 -0500 Subject: [PATCH 0093/1534] New translations tap.mdx (Arabic) --- website/src/pages/ar/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ar/indexing/tap.mdx b/website/src/pages/ar/indexing/tap.mdx index cc523ded6384..ee96a02cd5b8 100644 --- a/website/src/pages/ar/indexing/tap.mdx +++ b/website/src/pages/ar/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From 397ad49310c3d4c2b1e8946768113e0093a76c88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:01 -0500 Subject: [PATCH 0094/1534] New translations tap.mdx (Czech) --- website/src/pages/cs/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/cs/indexing/tap.mdx b/website/src/pages/cs/indexing/tap.mdx index bcf852442f4d..f8d028634016 100644 --- a/website/src/pages/cs/indexing/tap.mdx +++ b/website/src/pages/cs/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From 3987ef08c6ccd10513fbc0218ca7e5dad32186d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:02 -0500 Subject: [PATCH 0095/1534] New translations tap.mdx (German) --- website/src/pages/de/indexing/tap.mdx | 214 +++++++++++++------------- 1 file changed, 107 insertions(+), 107 deletions(-) diff --git a/website/src/pages/de/indexing/tap.mdx b/website/src/pages/de/indexing/tap.mdx index 27f3b85a8c58..126228396278 100644 --- a/website/src/pages/de/indexing/tap.mdx +++ b/website/src/pages/de/indexing/tap.mdx @@ -1,193 +1,193 @@ --- -title: TAP Migration Guide +title: TAP-Migrationsleitfaden --- -Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. +Erfahren Sie mehr über das neue Zahlungssystem von The Graph, **Timeline Aggregation Protocol, TAP**. Dieses System bietet schnelle, effiziente Mikrotransaktionen mit minimiertem Vertrauen. -## Overview +## Überblick -[TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: +[TAP] (https://docs.rs/tap_core/latest/tap_core/index.html) ist ein direkter Ersatz für das derzeitige Scalar-Zahlungssystem. Es bietet die folgenden Hauptfunktionen: -- Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. -- Allows Indexers control of receipts and payments, guaranteeing payment for queries. -- It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. +- Effiziente Abwicklung von Mikrozahlungen. +- Fügt den Onchain-Transaktionen und -Kosten eine weitere Ebene der Konsolidierung hinzu. +- Ermöglicht den Indexern die Kontrolle über Eingänge und Zahlungen und garantiert die Bezahlung von Abfragen. +- Es ermöglicht dezentralisierte, vertrauenslose Gateways und verbessert die Leistung des `indexer-service` für mehrere Absender. ## Besonderheiten -TAP allows a sender to make multiple payments to a receiver, **TAP Receipts**, which aggregates these payments into a single payment, a **Receipt Aggregate Voucher**, also known as a **RAV**. This aggregated payment can then be verified on the blockchain, reducing the number of transactions and simplifying the payment process. +TAP ermöglicht es einem Sender, mehrere Zahlungen an einen Empfänger zu leisten, **TAP Receipts**, der diese Zahlungen zu einer einzigen Zahlung zusammenfasst, einem **Receipt Aggregate Voucher**, auch bekannt als **RAV**. Diese aggregierte Zahlung kann dann auf der Blockchain verifiziert werden, wodurch sich die Anzahl der Transaktionen verringert und der Zahlungsvorgang vereinfacht wird. -For each query, the gateway will send you a `signed receipt` that is stored on your database. Then, these queries will be aggregated by a `tap-agent` through a request. Afterwards, you’ll receive a RAV. You can update a RAV by sending it with newer receipts and this will generate a new RAV with an increased value. +Für jede Abfrage sendet Ihnen das Gateway eine „signierte Quittung“, die in Ihrer Datenbank gespeichert wird. Dann werden diese Abfragen von einem „Tap-Agent“ durch eine Anfrage aggregiert. Anschließend erhalten Sie ein RAV. Sie können ein RAV aktualisieren, indem Sie es mit neueren Quittungen senden, wodurch ein neues RAV mit einem höheren Wert erzeugt wird. -### RAV Details +### RAV-Details -- It’s money that is waiting to be sent to the blockchain. +- Es ist Geld, das darauf wartet, an die Blockchain gesendet zu werden. -- It will continue to send requests to aggregate and ensure that the total value of non-aggregated receipts does not exceed the `amount willing to lose`. +- Es werden weiterhin Anträge auf Zusammenlegung stellen und sicherstellen, dass der Gesamtwert der nicht zusammengefassten Einnahmen `amount willing to lose` (den „Betrag, den man zu verlieren bereit ist“), nicht übersteigt. -- Each RAV can be redeemed once in the contracts, which is why they are sent after the allocation is closed. +- Jedes RAV kann einmal in den Verträgen eingelöst werden, weshalb sie nach Abschluss der Zuteilung versandt werden. -### Redeeming RAV +### Einlösen des RAV -As long as you run `tap-agent` and `indexer-agent`, everything will be executed automatically. The following provides a detailed breakdown of the process: +Solange Sie `tap-agent` und `indexer-agent` ausführen, wird alles automatisch ausgeführt. Im Folgenden finden Sie eine detaillierte Aufschlüsselung des Prozesses: -1. An Indexer closes allocation. +1. Ein Indexer schließt die Zuteilung ab. -2. ` period, tap-agent` takes all pending receipts for that specific allocation and requests an aggregation into a RAV, marking it as `last`. +2. ` period, tap-agent` nimmt alle ausstehenden Quittungen für diese spezifische Zuteilung und fordert eine Aggregation in einem RAV an, wobei es als `last` gekennzeichnet wird. -3. `indexer-agent` takes all the last RAVS and sends redeem requests to the blockchain, which will update the value of `redeem_at`. +3. `indexer-agent` nimmt alle letzten RAVS und sendet Einlöseanforderungen an die Blockchain, die den Wert von ‚redeem_at‘ aktualisiert. -4. During the `` period, `indexer-agent` monitors if the blockchain has any reorganizations that revert the transaction. +4. Während der ``-Periode überwacht der `indexer-agent`, ob es in der Blockchain irgendwelche Reorganisationen gibt, die die Transaktion rückgängig machen. - - If it was reverted, the RAV is resent to the blockchain. If it was not reverted, it gets marked as `final`. + - Wurde es rückgängig gemacht, wird das RAV erneut an die Blockchain gesendet. Wenn es nicht rückgängig gemacht wurde, wird es als `final` markiert. -## Blockchain Addresses +## Blockchain-Adressen -### Contracts +### Verträge -| Contract | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | -| ------------------- | -------------------------------------------- | -------------------------------------------- | -| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | -| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | -| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | +| Vertrag | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | +| -------------------------- | -------------------------------------------- | -------------------------------------------- | +| TAP-Prüfer | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Treuhandkonto | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | -| ---------- | --------------------------------------------- | --------------------------------------------- | -| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| Komponente | Edge- und Node-Mainnet (Arbitrum-Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| ---------------- | ---------------------------------------------- | --------------------------------------------- | +| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Unterzeichner | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | -### Requirements +### Anforderungen -In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. +Zusätzlich zu den typischen Anforderungen für den Betrieb eines Indexers benötigen Sie einen `tap-escrow-subgraph`-Endpunkt, um TAP-Aktualisierungen abzufragen. Sie können The Graph Network zur Abfrage verwenden oder sich selbst auf Ihrem `graph-node` hosten. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Graph TAP Arbitrum Sepolia subgraph (für The Graph Testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One subgraph (für The Graph Mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Hinweis: `indexer-agent` übernimmt derzeit nicht die Indizierung dieses Subgraphen, wie es bei der Bereitstellung von Netzwerk-Subgraphen der Fall ist. Daher müssen Sie ihn manuell indizieren. -## Migration Guide +## Migrationsleitfaden -### Software versions +### Software-Versionen -The required software version can be found [here](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). +Die erforderliche Softwareversion finden Sie [hier](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). -### Steps +### Schritte -1. **Indexer Agent** +1. **Indexer-Agent** - - Follow the [same process](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). - - Give the new argument `--tap-subgraph-endpoint` to activate the new TAP codepaths and enable redeeming of TAP RAVs. + - Folgen Sie dem [gleichen Prozess](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). + - Geben Sie das neue Argument `--tap-subgraph-endpoint` an, um die neuen TAP-Codepfade zu aktivieren und die Einlösung von TAP-RAVs zu ermöglichen. -2. **Indexer Service** +2. **Indexer-Service** - - Fully replace your current configuration with the [new Indexer Service rs](https://github.com/graphprotocol/indexer-rs). It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). - - Like the older version, you can scale Indexer Service horizontally easily. It is still stateless. + - Ersetzen Sie Ihre aktuelle Konfiguration vollständig durch den [neuen Indexer-Service rs](https://github.com/graphprotocol/indexer-rs). Es wird empfohlen, dass Sie das [Containerbild](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs) verwenden. + - Wie bei der älteren Version können Sie den Indexer-Service problemlos horizontal skalieren. Er ist immer noch zustandslos. 3. **TAP Agent** - - Run _one_ single instance of [TAP Agent](https://github.com/graphprotocol/indexer-rs) at all times. It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Führen Sie immer _eine_ einzelne Instanz von [TAP Agent](https://github.com/graphprotocol/indexer-rs) aus. Es wird empfohlen, dass Sie das [Container-Image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs) verwenden. -4. **Configure Indexer Service and TAP Agent** +4. **Indexer-Service und TAP-Agent konfigurieren** - Configuration is a TOML file shared between `indexer-service` and `tap-agent`, supplied with the argument `--config /path/to/config.toml`. + Die Konfiguration ist eine TOML-Datei, die von `indexer-service` und `tap-agent` gemeinsam genutzt wird und mit dem Argument `--config /path/to/config.toml` übergeben wird. - Check out the full [configuration](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) and the [default values](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) + Sehen Sie sich die vollständige [Konfiguration] (https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) und die [Standardwerte] (https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) an. -For minimal configuration, use the following template: +Für eine minimale Konfiguration verwenden Sie die folgende Vorlage: ```bash -# You will have to change *all* the values below to match your setup. +# Sie müssen *alle* nachstehenden Werte ändern, um sie an Ihre Einrichtung anzupassen. # -# Some of the config below are global graph network values, which you can find here: +# Einige der nachstehenden Konfigurationswerte sind globale Graphnetzwerkwerte, die Sie hier finden können: # # -# Pro tip: if you need to load some values from the environment into this config, you -# can overwrite with environment variables. For example, the following can be replaced -# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# Pro-Tipp: Wenn Sie einige Werte aus der Umgebung in diese Konfiguration laden müssen, +# können Sie sie mit Umgebungsvariablen überschreiben. Als Datenbeispiel kann folgendes ersetzt werden +# durch [PREFIX]_DATABASE_POSTGRESURL, wobei PREFIX `INDEXER_SERVICE` oder `TAP_AGENT` sein kann: # -# [database] -# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" +# [Datenbank] +# postgres_url = „postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0“ [indexer] -indexer_address = "0x1111111111111111111111111111111111111111" -operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" +indexer_address = „0x1111111111111111111111111111111111111111“ +operator_mnemonic = „celery smart tip orange scare van steel radio dragon joy alarm crane“ [database] -# The URL of the Postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. -postgres_url = "postgres://postgres@postgres:5432/postgres" +# Die URL der Postgres-Datenbank, die für die Indexer-Komponenten verwendet wird. Die gleiche Datenbank, +# die auch vom `indexer-agent` verwendet wird. Es wird erwartet, dass `indexer-agent` +# die notwendigen Tabellen erstellt. +postgres_url = „postgres://postgres@postgres:5432/postgres“ [graph_node] -# URL to your graph-node's query endpoint -query_url = "" -# URL to your graph-node's status endpoint -status_url = "" +# URL zum Abfrageendpunkt Ihres Graph-Knotens +query_url = „“ +# URL zum Status-Endpunkt Ihres Graph-Knotens +status_url = „“ [subgraphs.network] -# Query URL for the Graph Network subgraph. -query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only -deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# Abfrage-URL für den Graph Network Subgraph. +query_url = „“ +# Optional, Einsatz, nach dem im lokalen `graph-node` gesucht wird, falls er lokal indiziert ist. +# Es wird empfohlen, den Subgraphen lokal zu indizieren. +# HINWEIS: Verwenden Sie nur `query_url` oder `deployment_id`. +deployment_id = „Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa“ [subgraphs.escrow] -# Query URL for the Escrow subgraph. -query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only -deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# Abfrage-URL für den Subgraphen „Escrow“. +query_url = „“ +# Optional, Einsatz, nach dem im lokalen `graph-node` gesucht wird, falls er lokal indiziert ist. +# Es wird empfohlen, den Subgraphen lokal zu indizieren. +# HINWEIS: Verwenden Sie nur `query_url` oder `deployment_id`. +deployment_id = „Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa“ [blockchain] -# The chain ID of the network that the graph network is running on +# Die Ketten-ID des Netzwerks, auf dem das Graph-Netzwerk läuft chain_id = 1337 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. -receipts_verifier_address = "0x2222222222222222222222222222222222222222" +# Vertragsadresse des RAV-Prüfers (receipt aggregate voucher) von TAP. +receipts_verifier_address = „0x2222222222222222222222222222222222222222“ ######################################## -# Specific configurations to tap-agent # +# Spezifische Konfigurationen für tap-agent # ######################################## [tap] -# This is the amount of fees you are willing to risk at any given time. For ex. -# if the sender stops supplying RAVs for long enough and the fees exceed this -# amount, the indexer-service will stop accepting queries from the sender -# until the fees are aggregated. -# NOTE: Use strings for decimal values to prevent rounding errors -# e.g: -# max_amount_willing_to_lose_grt = "0.1" -max_amount_willing_to_lose_grt = 20 +# Dies ist die Höhe der Gebühren, die Sie bereit sind, zu einem bestimmten Zeitpunkt zu riskieren. Zum Beispiel, +# wenn der Sender lange genug keine RAVs mehr liefert und die Gebühren diesen Betrag +# übersteigt, wird der Indexer-Service keine Anfragen mehr vom Absender annehmen +# bis die Gebühren aggregiert sind. +# HINWEIS: Verwenden Sie Strings für dezimale Werte, um Rundungsfehler zu vermeiden. +# z.B.: +# max_amount_willing_to_lose_grt = „0.1“ +max_Betrag_willig_zu_verlieren_grt = 20 [tap.sender_aggregator_endpoints] -# Key-Value of all senders and their aggregator endpoints -# This one below is for the E&N testnet gateway for example. -0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" +# Key-Value aller Absender und ihrer Aggregator-Endpunkte +# Das folgende Datenbeispiel gilt für das E&N Testnet-Gateway. +0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = „https://tap-aggregator.network.thegraph.com“ ``` -Notes: +Anmerkungen: -- Values for `tap.sender_aggregator_endpoints` can be found in the [gateway section](/indexing/tap/#gateway). -- Values for `blockchain.receipts_verifier_address` must be used accordingly to the [Blockchain addresses section](/indexing/tap/#contracts) using the appropriate chain id. +- Die Werte für `tap.sender_aggregator_endpoints` finden Sie im Abschnitt [gateway](/indexing/tap/#gateway). +- Die Werte für `blockchain.receipts_verifier_address` müssen entsprechend dem Abschnitt [Blockchain-Adressen] (/indexing/tap/#contracts) unter Verwendung der entsprechenden Ketten-ID verwendet werden. **Log Level** -- You can set the log level by using the `RUST_LOG` environment variable. -- It’s recommended that you set it to `RUST_LOG=indexer_tap_agent=debug,info`. +- Sie können die Protokollstufe mit der Umgebungsvariablen `RUST_LOG` einstellen. +- Es wird empfohlen, die Einstellung `RUST_LOG=indexer_tap_agent=debug,info` zu verwenden. ## Monitoring -### Metrics +### Metriken -All components expose the port 7300 to be queried by prometheus. +Alle Komponenten stellen den Port 7300 zur Abfrage durch prometheus zur Verfügung. -### Grafana Dashboard +### Grafana-Dashboard -You can download [Grafana Dashboard](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) and import. +Sie können [Grafana-Dashboard] (https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) herunterladen und importieren. ### Launchpad -Currently, there is a WIP version of `indexer-rs` and `tap-agent` that can be found [here](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) +Derzeit gibt es eine WIP-Version von `indexer-rs` und `tap-agent`, die [hier](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) zu finden ist. From 77b1cf6b43950b29b772ed2ffbd6bac2ed453847 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:03 -0500 Subject: [PATCH 0096/1534] New translations tap.mdx (Italian) --- website/src/pages/it/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/it/indexing/tap.mdx b/website/src/pages/it/indexing/tap.mdx index a07a2f6c98a3..8604a92b41e7 100644 --- a/website/src/pages/it/indexing/tap.mdx +++ b/website/src/pages/it/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From 533f5a04b27f080d85ff1e774cac28d0f60ec6ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:04 -0500 Subject: [PATCH 0097/1534] New translations tap.mdx (Japanese) --- website/src/pages/ja/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ja/indexing/tap.mdx b/website/src/pages/ja/indexing/tap.mdx index f35c37cf4a46..b1d43a4e628c 100644 --- a/website/src/pages/ja/indexing/tap.mdx +++ b/website/src/pages/ja/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From ff287f07ba1c21627c44ce3f67af90e5e573380e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:05 -0500 Subject: [PATCH 0098/1534] New translations tap.mdx (Korean) --- website/src/pages/ko/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ko/indexing/tap.mdx b/website/src/pages/ko/indexing/tap.mdx index 4132216ca5b6..3bab672ab211 100644 --- a/website/src/pages/ko/indexing/tap.mdx +++ b/website/src/pages/ko/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From e269089a8fcf2a656523d1f60fad312851f34e3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:07 -0500 Subject: [PATCH 0099/1534] New translations tap.mdx (Dutch) --- website/src/pages/nl/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/nl/indexing/tap.mdx b/website/src/pages/nl/indexing/tap.mdx index 4132216ca5b6..3bab672ab211 100644 --- a/website/src/pages/nl/indexing/tap.mdx +++ b/website/src/pages/nl/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From eb243c757d0e286f107b1daa5661b018a99d79ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:08 -0500 Subject: [PATCH 0100/1534] New translations tap.mdx (Polish) --- website/src/pages/pl/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/pl/indexing/tap.mdx b/website/src/pages/pl/indexing/tap.mdx index 4132216ca5b6..3bab672ab211 100644 --- a/website/src/pages/pl/indexing/tap.mdx +++ b/website/src/pages/pl/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From a7993199809ab982eb695a8354aed092b413f86b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:09 -0500 Subject: [PATCH 0101/1534] New translations tap.mdx (Portuguese) --- website/src/pages/pt/indexing/tap.mdx | 180 +++++++++++++------------- 1 file changed, 90 insertions(+), 90 deletions(-) diff --git a/website/src/pages/pt/indexing/tap.mdx b/website/src/pages/pt/indexing/tap.mdx index b69d63ceb91e..a8f6b2d7d713 100644 --- a/website/src/pages/pt/indexing/tap.mdx +++ b/website/src/pages/pt/indexing/tap.mdx @@ -1,112 +1,112 @@ --- -title: TAP Migration Guide +title: Como migrar para o TAP --- -Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. +Conheça o novo sistema de pagamentos do The Graph: **TAP — Timeline Aggregation Protocol** ("Protocolo de Agregação de Histórico"): um sistema de microtransações rápidas e eficientes, livre de confiança. ## Visão geral -[TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: +O [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) é um programa modular que substituirá o sistema de pagamento Scalar atualmente em uso. Os recursos do TAP incluem: -- Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. -- Allows Indexers control of receipts and payments, guaranteeing payment for queries. -- It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. +- Processamento eficiente de micropagamentos. +- Uma camada de consolidações para transações e custos na chain. +- Controle total de recibos e pagamentos para Indexadores, garantindo pagamentos por queries. +- Pontes de ligação descentralizadas e livres de confiança, melhorando o desempenho do `indexer-service` para grupos de remetentes. ## Especificações -TAP allows a sender to make multiple payments to a receiver, **TAP Receipts**, which aggregates these payments into a single payment, a **Receipt Aggregate Voucher**, also known as a **RAV**. This aggregated payment can then be verified on the blockchain, reducing the number of transactions and simplifying the payment process. +O TAP permite que um remetente faça múltiplos pagamentos a um destinatário — os **TAP Receipts** ("Recibos do TAP") — que agrega os pagamentos em um, o **RAV — Receipt Aggregate Voucher** (Prova de Recibos Agregados). Este pagamento agregado pode ser verificado na blockchain, reduzindo o número de transações e simplificando o processo de pagamento. -For each query, the gateway will send you a `signed receipt` that is stored on your database. Then, these queries will be aggregated by a `tap-agent` through a request. Afterwards, you’ll receive a RAV. You can update a RAV by sending it with newer receipts and this will generate a new RAV with an increased value. +Para cada query, a ponte de ligação enviará um `signed receipt` ("recibo assinado") para armazenar na sua base de dados. Estes queries serão então agregados por um `tap-agent` através de uma solicitação. Depois, você receberá um RAV. Para atualizar um RAV, envie-o com novos recibos para gerar um novo RAV com valor maior. -### RAV Details +### Detalhes de um RAV -- It’s money that is waiting to be sent to the blockchain. +- É dinheiro esperando ser enviado à blockchain. -- It will continue to send requests to aggregate and ensure that the total value of non-aggregated receipts does not exceed the `amount willing to lose`. +- Ele continuará a enviar solicitações de agregação e evitará que o valor total de recibos não agregados passe do `amount willing to lose` ("quantia viável de perder"). -- Each RAV can be redeemed once in the contracts, which is why they are sent after the allocation is closed. +- Cada RAV pode ser resgatado uma vez nos contratos; é por isto que são enviados após o fechamento da alocação. -### Redeeming RAV +### Como resgatar um RAV -As long as you run `tap-agent` and `indexer-agent`, everything will be executed automatically. The following provides a detailed breakdown of the process: +Tudo será executado automaticamente enquanto `tap-agent` e `indexer-agent` forem executados. Veja um passo-a-passo do processo: -1. An Indexer closes allocation. +1. Um Indexador fecha uma alocação. -2. ` period, tap-agent` takes all pending receipts for that specific allocation and requests an aggregation into a RAV, marking it as `last`. +2. ` period, tap-agent` pega todos os recibos pendentes dessa alocação específica e solicita uma agregação para um RAV, marcando como `last` ("último"). -3. `indexer-agent` takes all the last RAVS and sends redeem requests to the blockchain, which will update the value of `redeem_at`. +3. O `indexer-agent` pega todos os últimos RAVs e manda solicitações de resgate à blockchain, que atualizará o valor de `redeem-at`. -4. During the `` period, `indexer-agent` monitors if the blockchain has any reorganizations that revert the transaction. +4. Durante o período de ``, o `indexer-agent` monitora se a blockchain tem reorganizações que possam reverter a transação. - - If it was reverted, the RAV is resent to the blockchain. If it was not reverted, it gets marked as `final`. + - Se revertida, o RAV é reenviado à blockchain. Se não for revertida, é marcada como `final`. -## Blockchain Addresses +## Endereços de Blockchain -### Contracts +### Contratos -| Contract | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | -| ------------------- | -------------------------------------------- | -------------------------------------------- | -| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | -| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | -| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | +| Contrato | Mainnet Arbitrum (42161) | Arbitrum Sepolia (421614) | +| ------------------------- | -------------------------------------------- | -------------------------------------------- | +| Verificador do TAP | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | -### Gateway +### Porta de Ligação -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | -| ---------- | --------------------------------------------- | --------------------------------------------- | -| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| Componente | Mainnet Edge and Note (Mainnet Arbitrum) | Testnet do Edge and Node (Arbitrum Sepolia) | +| -------------- | --------------------------------------------- | ------------------------------------------------ | +| Remetente | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Signatários | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Agregador | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Requisitos -In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. +Além dos requisitos típicos para executar um indexador, é necessário um endpoint `tap-escrow-subgraph` para fazer queries de atualizações do TAP. É possível usar o The Graph Network para fazer queries ou se hospedar no seu `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Subgraph do TAP do The Graph — Arbitrum Sepolia (para a testnet do The Graph)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Subgraph do TAP do The Graph (para a mainnet do The Graph)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Nota: o `indexer-agent` atualmente não executa o indexamento deste subgraph como faz com o lançamento de subgraphs da rede. Portanto, ele deve ser anexado manualmente. -## Migration Guide +## Guia de migração -### Software versions +### Versões de software -The required software version can be found [here](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). +O software necessário está [aqui](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). -### Steps +### Passo a passo -1. **Indexer Agent** +1. **Agente Indexador** - - Follow the [same process](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). - - Give the new argument `--tap-subgraph-endpoint` to activate the new TAP codepaths and enable redeeming of TAP RAVs. + - Siga o [mesmo processo](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). + - Insira o novo argumento `--tap-subgraph-endpoint` para ativar os novos caminhos de código e ativar o resgate de RAVs do TAP. -2. **Indexer Service** +2. **Serviço Indexador** - - Fully replace your current configuration with the [new Indexer Service rs](https://github.com/graphprotocol/indexer-rs). It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). - - Like the older version, you can scale Indexer Service horizontally easily. It is still stateless. + - Substitua a sua configuração atual com o novo [rs do Serviço Indexador](https://github.com/graphprotocol/indexer-rs). Vale apena usar a [imagem recipiente](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Assim como a versão mais antiga, o Serviço Indexador pode ser horizontalmente escalado com facilidade. Neste caso, ainda não mantém memória. -3. **TAP Agent** +3. **Agente do TAP** - - Run _one_ single instance of [TAP Agent](https://github.com/graphprotocol/indexer-rs) at all times. It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Execute _uma_ instância do [Agente do TAP](https://github.com/graphprotocol/indexer-rs) constantemente. Recomendamos usar a [imagem recipiente](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). -4. **Configure Indexer Service and TAP Agent** +4. **Configuração do Serviço Indexador e Agente do TAP** - Configuration is a TOML file shared between `indexer-service` and `tap-agent`, supplied with the argument `--config /path/to/config.toml`. + A configuração está num arquivo TOML compartilhado entre `indexer-service` e `tap-agent`, providenciada com o argumento `--config /path/to/config.toml`. - Check out the full [configuration](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) and the [default values](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) + Veja a íntegra da [configuração](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) e os [valores padrão](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) -For minimal configuration, use the following template: +Para o mínimo de configuração, veja o exemplo abaixo: ```bash -# You will have to change *all* the values below to match your setup. +# Você deve mudar *todos* os valores abaixo para mudar sua configuração. # -# Some of the config below are global graph network values, which you can find here: +# O abaixo inclui valores globais da Graph Network, como visto aqui: # # -# Pro tip: if you need to load some values from the environment into this config, you -# can overwrite with environment variables. For example, the following can be replaced -# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# Fica a dica: se precisar carregar alguns variáveis do ambiente nesta configuração, você +# pode substituí-los com variáveis do ambiente. Por exemplo: pode-se substituir +# o abaixo por [PREFIX]_DATABASE_POSTGRESURL, onde PREFIX pode ser `INDEXER_SERVICE` ou `TAP_AGENT`: # # [database] # postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" @@ -116,9 +116,9 @@ indexer_address = "0x1111111111111111111111111111111111111111" operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] -# The URL of the Postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. +# A URL da base de dados Postgres usada para os componentes do indexador. +# A mesma base de dados usada pelo `indexer-agent`. Espera-se que o `indexer-agent` +# criará as tabelas necessárias. postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] @@ -128,11 +128,11 @@ query_url = "" status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# URL de query pro subgraph do Graph Network. query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only +# Opcional, procure o lançamento no `graph-node` local, se localmente indexado. +# Vale a pena indexar o subgraph localmente. +# NOTA: Usar apenas `query_url` ou `deployment_id` deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] @@ -144,50 +144,50 @@ query_url = "" deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] -# The chain ID of the network that the graph network is running on +# ID de chain da rede que está a executar o Graph Network chain_id = 1337 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. +# Endereço de contrato do verificador de prova de agregação de recibos do TAP. receipts_verifier_address = "0x2222222222222222222222222222222222222222" ######################################## -# Specific configurations to tap-agent # +# Configurações específicas para o tap-agent # ######################################## [tap] -# This is the amount of fees you are willing to risk at any given time. For ex. -# if the sender stops supplying RAVs for long enough and the fees exceed this -# amount, the indexer-service will stop accepting queries from the sender -# until the fees are aggregated. -# NOTE: Use strings for decimal values to prevent rounding errors -# e.g: -# max_amount_willing_to_lose_grt = "0.1" +# Esta é a quantia de taxas que você está disposto a arriscar. Por exemplo: +# se o remetente parar de enviar RAVs por tempo suficiente e as taxas passarem +# desta quantia, o indexer-service não aceitará mais queries deste remetente +# até que as taxas sejam agregadas. +# NOTA: Use strings para valores decimais, para evitar erros de arredondamento +# Por exemplo: +# max_amount_willing_to_lose_grt = "0,1" max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -# Key-Value of all senders and their aggregator endpoints -# This one below is for the E&N testnet gateway for example. -0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" +# Valor-Chave de todos os remetentes e seus endpoints agregadores +# Por exemplo, o abaixo é para a ponte de ligação do testnet Edge & Node. +0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://t ``` Notas: -- Values for `tap.sender_aggregator_endpoints` can be found in the [gateway section](/indexing/tap/#gateway). -- Values for `blockchain.receipts_verifier_address` must be used accordingly to the [Blockchain addresses section](/indexing/tap/#contracts) using the appropriate chain id. +- Os valores de `tap.sender_aggregator_endpoints` estão na seção sobre [pontos de ligação](/indexing/tap/#gateway). +- Os valores de `blockchain.receipts_verifier_address` devem ser usados conforme a [seção sobre endereços de blockchain](/indexing/tap/#contracts), usando a ID de chain apropriada. -**Log Level** +**Nível de Log** -- You can set the log level by using the `RUST_LOG` environment variable. -- It’s recommended that you set it to `RUST_LOG=indexer_tap_agent=debug,info`. +- Os níveis de log podem ser ajustados pelo variável de ambiente `RUST_LOG`. +- Recomendamos configurar para `RUST_LOG=indexer_tap_agent=debug,info`. -## Monitoring +## Monitoração -### Metrics +### Métricas -All components expose the port 7300 to be queried by prometheus. +Todos os componentes expõem a porta 7300 a queries do Prometheus. -### Grafana Dashboard +### Paineis de Controle com Grafana -You can download [Grafana Dashboard](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) and import. +O [painel de controlo do Grafana](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) pode ser baixado e importado. -### Launchpad +### Plataforma de Lançamento -Currently, there is a WIP version of `indexer-rs` and `tap-agent` that can be found [here](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) +Atualmente, há uma versão em obras do `indexer-rs` e `tap-agent` [aqui](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer). From ab37dfb84fbb08204b09b819acd6ce801af56c02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:10 -0500 Subject: [PATCH 0102/1534] New translations tap.mdx (Russian) --- website/src/pages/ru/indexing/tap.mdx | 182 +++++++++++++------------- 1 file changed, 91 insertions(+), 91 deletions(-) diff --git a/website/src/pages/ru/indexing/tap.mdx b/website/src/pages/ru/indexing/tap.mdx index bcc2ed4535b8..754c916ca348 100644 --- a/website/src/pages/ru/indexing/tap.mdx +++ b/website/src/pages/ru/indexing/tap.mdx @@ -1,112 +1,112 @@ --- -title: TAP Migration Guide +title: Руководство по миграции TAP --- -Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. +Узнайте о новой платежной системе The Graph, **Timeline Aggregation Protocol, TAP**. Эта система обеспечивает быстрые и эффективные микротранзакции с минимальным уровнем доверия. ## Обзор -[TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: +[TAP](https://docs.rs/tap_core/latest/tap_core/index.html) — это полная замена существующей в настоящее время платежной системы Scalar. Она предоставляет следующие ключевые функции: -- Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. -- Allows Indexers control of receipts and payments, guaranteeing payment for queries. -- It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. +- Эффективно обрабатывает микроплатежи. +- Добавляет уровень консолидации к транзакциям и затратам ончейна. +- Позволяет Индексаторам управлять поступлениями и платежами, гарантируя оплату запросов. +- Обеспечивает децентрализованные, не требующие доверия шлюзы и повышает производительность `indexer-service` для нескольких отправителей. ## Специфические особенности -TAP allows a sender to make multiple payments to a receiver, **TAP Receipts**, which aggregates these payments into a single payment, a **Receipt Aggregate Voucher**, also known as a **RAV**. This aggregated payment can then be verified on the blockchain, reducing the number of transactions and simplifying the payment process. +TAP позволяет отправителю совершать несколько платежей получателю, **TAP Receipts**, который объединяет эти платежи в один платеж, **Receipt Aggregate Voucher**, также известный как **RAV**. Затем этот агрегированный платеж можно проверить в блокчейне, что сокращает количество транзакций и упрощает процесс оплаты. -For each query, the gateway will send you a `signed receipt` that is stored on your database. Then, these queries will be aggregated by a `tap-agent` through a request. Afterwards, you’ll receive a RAV. You can update a RAV by sending it with newer receipts and this will generate a new RAV with an increased value. +Для каждого запроса шлюз отправит вам `signed receipt`, который будет сохранен в Вашей базе данных. Затем эти запросы будут агрегированы `tap-agent` через запрос. После этого Вы получите RAV. Вы можете обновить RAV, отправив ему новые квитанции, что приведет к генерации нового RAV с увеличенным значением. -### RAV Details +### Подробнее о RAV -- It’s money that is waiting to be sent to the blockchain. +- Это денежные средства, которые ждут отправки в блокчейн. -- It will continue to send requests to aggregate and ensure that the total value of non-aggregated receipts does not exceed the `amount willing to lose`. +- Он будет продолжать отправлять запросы на агрегирование и гарантировать, что общая стоимость неагрегированных поступлений не превысит `amount willing to lose` (сумму, которую мы готовы потерять). -- Each RAV can be redeemed once in the contracts, which is why they are sent after the allocation is closed. +- Каждый RAV можно выкупить один раз в контрактах, поэтому они отправляются после закрытия аллокации. -### Redeeming RAV +### Выкуп RAV -As long as you run `tap-agent` and `indexer-agent`, everything will be executed automatically. The following provides a detailed breakdown of the process: +При запуске `tap-agent` и `indexer-agent` все будет выполняться автоматически. Ниже приводится подробное описание процесса: -1. An Indexer closes allocation. +1. Индексатор закрывает аллокацию. -2. ` period, tap-agent` takes all pending receipts for that specific allocation and requests an aggregation into a RAV, marking it as `last`. +2. ` period, tap-agent` принимает все ожидающие поступления для этой конкретной аллокации и запрашивает агрегацию в RAV, помечая её как `last`. -3. `indexer-agent` takes all the last RAVS and sends redeem requests to the blockchain, which will update the value of `redeem_at`. +3. `indexer-agent` берет все последние RAVS и отправляет запросы на выкуп в блокчейн, который обновит значение `redeem_at`. -4. During the `` period, `indexer-agent` monitors if the blockchain has any reorganizations that revert the transaction. +4. В течение периода `` `indexer-agent` отслеживает, происходят ли в блокчейне какие-либо реорганизации, которые отменяют транзакцию. - - If it was reverted, the RAV is resent to the blockchain. If it was not reverted, it gets marked as `final`. + - Если он был отменён, RAV повторно отправляется в блокчейн. Если он не был отменён, он помечается как `final`. -## Blockchain Addresses +## Адреса блокчейна -### Contracts +### Контракты -| Contract | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | -| ------------------- | -------------------------------------------- | -------------------------------------------- | -| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | -| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | -| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | +| Контракт | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | +| ---------------------- | -------------------------------------------- | -------------------------------------------- | +| TAP-верификатор | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | -### Gateway +### Шлюз -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | -| ---------- | --------------------------------------------- | --------------------------------------------- | -| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| Компонент | Edge и Node Mainnet (Arbitrum Mainnet) | Edge и Node Testnet (Arbitrum Sepolia) | +| --------------- | --------------------------------------------- | --------------------------------------------- | +| Отправитель | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Подписанты | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Агрегатор | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Требования -In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. +Помимо типичных требований для запуска индексатора Вам понадобится конечная точка `tap-escrow-subgraph` для запроса обновлений TAP. Вы можете использовать The Graph Network для запроса или размещения себя на своей `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) -- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) +- [Субграф Graph TAP Arbitrum Sepolia (для тестовой сети The Graph)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Субграф Graph TAP Arbitrum One (для основной сети The Graph)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Примечание: `indexer-agent` в настоящее время не обрабатывает индексирование этого субграфа, как это происходит при развертывании сетевого субграфа. В итоге Вам придется индексировать его вручную. -## Migration Guide +## Руководство по миграции -### Software versions +### Версии программного обеспечения -The required software version can be found [here](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). +Требуемую версию программного обеспечения можно найти [здесь] (https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitum-one.md#latest-releases). -### Steps +### Шаги 1. **Indexer Agent** - - Follow the [same process](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). - - Give the new argument `--tap-subgraph-endpoint` to activate the new TAP codepaths and enable redeeming of TAP RAVs. + - Следуйте [этому же процессу] (https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-compents). + - Укажите новый аргумент `--tap-subgraph-endpoint`, чтобы активировать новые кодовые пути TAP и разрешить выкуп TAP RAV. 2. **Indexer Service** - - Fully replace your current configuration with the [new Indexer Service rs](https://github.com/graphprotocol/indexer-rs). It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). - - Like the older version, you can scale Indexer Service horizontally easily. It is still stateless. + - Полностью замените текущую конфигурацию на [новую службу Индексатора rs](https://github.com/graphprotocol/indexer-rs). Рекомендуется использовать [образ контейнера](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Как и в старой версии, Вы можете легко расположить Indexer Service горизонтально. Он по-прежнему не закреплен. 3. **TAP Agent** - - Run _one_ single instance of [TAP Agent](https://github.com/graphprotocol/indexer-rs) at all times. It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Всегда запускайте _один_ единственный экземпляр [TAP Agent](https://github.com/graphprotocol/indexer-rs). Рекомендуется использовать [образ контейнера](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). -4. **Configure Indexer Service and TAP Agent** +4. **Настройка Indexer Service и TAP Agent** - Configuration is a TOML file shared between `indexer-service` and `tap-agent`, supplied with the argument `--config /path/to/config.toml`. + Конфигурация представляет собой файл TOML, совместно используемый `indexer-service` и `tap-agent`, снабженный аргументом `--config /path/to/config.toml`. - Check out the full [configuration](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) and the [default values](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) + Ознакомьтесь с полной [конфигурацией](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) и [значениями по умолчанию](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) -For minimal configuration, use the following template: +Для минимальной конфигурации используйте следующий шаблон: ```bash -# You will have to change *all* the values below to match your setup. +# Вам придется изменить *все* приведенные ниже значения, чтобы они соответствовали вашим настройкам. # -# Some of the config below are global graph network values, which you can find here: +# Некоторые из приведенных ниже конфигураций представляют собой глобальные значения graph network, которые Вы можете найти здесь: # # -# Pro tip: if you need to load some values from the environment into this config, you -# can overwrite with environment variables. For example, the following can be replaced -# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# Совет профессионала: если Вам нужно загрузить некоторые значения из среды в эту конфигурацию, Вы +# можете перезаписать их переменными среды. Например, следующее можно заменить +# на [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: # # [database] # postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" @@ -116,78 +116,78 @@ indexer_address = "0x1111111111111111111111111111111111111111" operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] -# The URL of the Postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. +# URL-адрес базы данных Postgres, используемой для компонентов индексатора. Та же база данных, +# которая используется `indexer-agent`. Ожидается, что `indexer-agent` создаст +# необходимые таблицы. postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] -# URL to your graph-node's query endpoint +# URL-адрес конечной точки запроса Вашей graph-node query_url = "" -# URL to your graph-node's status endpoint +# URL-адрес конечной точки статуса Вашей graph-node status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +# URL-адрес запроса для субграфа Graph Network. query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only +# Необязательно, развертывание нужно искать в локальной `graph-node`, если оно локально проиндексировано. +# Рекомендуется индексировать субграф локально. +# ПРИМЕЧАНИЕ: используйте только `query_url` или `deployment_id` deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +# URL-адрес запроса для субграфа Escrow. query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only +# Необязательно, развертывание нужно искать в локальной `graph-node`, если оно локально проиндексировано. +# Рекомендуется индексировать субграф локально. +# ПРИМЕЧАНИЕ: используйте только `query_url` или `deployment_id` deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] -# The chain ID of the network that the graph network is running on +# Идентификатор чейна сети, в которой работает the graph network работает на chain_id = 1337 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. +# Контрактный адрес верификатора receipt aggregate voucher (RAV) TAP receipts_verifier_address = "0x2222222222222222222222222222222222222222" ######################################## -# Specific configurations to tap-agent # +# Специальные настройки для tap-agent # ######################################## [tap] -# This is the amount of fees you are willing to risk at any given time. For ex. -# if the sender stops supplying RAVs for long enough and the fees exceed this -# amount, the indexer-service will stop accepting queries from the sender -# until the fees are aggregated. -# NOTE: Use strings for decimal values to prevent rounding errors -# e.g: +# Это сумма комиссий, которой вы готовы рискнуть в любой момент времени. Например, +# если отправитель не совершает поставку RAV достаточно длительное время, и комиссии превышают это значение +# суммарно, служба-индексатор перестанет принимать запросы от отправителя +# до тех пор, пока комиссии не будут суммированы. +# ПРИМЕЧАНИЕ: Используйте строки для десятичных значений, чтобы избежать ошибок округления +# например: # max_amount_willing_to_lose_grt = "0.1" max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -# Key-Value of all senders and their aggregator endpoints -# This one below is for the E&N testnet gateway for example. +# Ключ-значение всех отправителей и их конечных точек агрегатора +# Ниже приведен пример шлюза тестовой сети E&N. 0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" ``` Примечания: -- Values for `tap.sender_aggregator_endpoints` can be found in the [gateway section](/indexing/tap/#gateway). -- Values for `blockchain.receipts_verifier_address` must be used accordingly to the [Blockchain addresses section](/indexing/tap/#contracts) using the appropriate chain id. +- Значения для `tap.sender_aggregator_endpoints` можно найти в [разделе шлюза](/indexing/tap/#gateway). +- Значения для `blockchain.receipts_verifier_address` должны использоваться в соответствии с [разделом Адреса блокчейна](/indexing/tap/#contracts) с использованием соответствующего идентификатора чейна. -**Log Level** +**Уровень лога** -- You can set the log level by using the `RUST_LOG` environment variable. -- It’s recommended that you set it to `RUST_LOG=indexer_tap_agent=debug,info`. +- Вы можете установить уровень лога, используя переменную среды RUST_LOG. +- Рекомендуется установить для него значение `RUST_LOG=indexer_tap_agent=debug,info`. -## Monitoring +## Мониторинг -### Metrics +### Метрики -All components expose the port 7300 to be queried by prometheus. +Все компоненты предоставляют порт 7300 для запроса Prometheus. -### Grafana Dashboard +### Графический интерфейс Grafana -You can download [Grafana Dashboard](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) and import. +Вы можете скачать [Графический интерфейс Grafana](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) и импортировать. -### Launchpad +### Панель запуска -Currently, there is a WIP version of `indexer-rs` and `tap-agent` that can be found [here](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) +В настоящее время существует WIP-версия `indexer-rs` и `tap-agent`, которую можно найти [здесь](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) From 1faf6b1c519b8a079a382da4d1a25a4fc0c9649c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:11 -0500 Subject: [PATCH 0103/1534] New translations tap.mdx (Swedish) --- website/src/pages/sv/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/sv/indexing/tap.mdx b/website/src/pages/sv/indexing/tap.mdx index bd539c8dfb33..d69cb7b5bc91 100644 --- a/website/src/pages/sv/indexing/tap.mdx +++ b/website/src/pages/sv/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From a98bfbc7e6d1e83a0fb766604a1cf5ee80c49e62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:12 -0500 Subject: [PATCH 0104/1534] New translations tap.mdx (Turkish) --- website/src/pages/tr/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/tr/indexing/tap.mdx b/website/src/pages/tr/indexing/tap.mdx index 8fd44a6693de..5ad4f2dc020e 100644 --- a/website/src/pages/tr/indexing/tap.mdx +++ b/website/src/pages/tr/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From e4964612083692d9cc7472f5bc269f98aeb5195e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:13 -0500 Subject: [PATCH 0105/1534] New translations tap.mdx (Ukrainian) --- website/src/pages/uk/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/uk/indexing/tap.mdx b/website/src/pages/uk/indexing/tap.mdx index 4132216ca5b6..3bab672ab211 100644 --- a/website/src/pages/uk/indexing/tap.mdx +++ b/website/src/pages/uk/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From 545f0194338cf9ab28aa4bc0a0676ce916011faa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:14 -0500 Subject: [PATCH 0106/1534] New translations tap.mdx (Chinese Simplified) --- website/src/pages/zh/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/zh/indexing/tap.mdx b/website/src/pages/zh/indexing/tap.mdx index a93fe0ae6937..de09d72fa74a 100644 --- a/website/src/pages/zh/indexing/tap.mdx +++ b/website/src/pages/zh/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From 1977d30e89041a05b2be33389835dbd23949fe13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:15 -0500 Subject: [PATCH 0107/1534] New translations tap.mdx (Urdu (Pakistan)) --- website/src/pages/ur/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ur/indexing/tap.mdx b/website/src/pages/ur/indexing/tap.mdx index 543e1646070a..227fbfc0593f 100644 --- a/website/src/pages/ur/indexing/tap.mdx +++ b/website/src/pages/ur/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From 7247a275b8a01434bb5da3a6cea6e629f5c215eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:16 -0500 Subject: [PATCH 0108/1534] New translations tap.mdx (Vietnamese) --- website/src/pages/vi/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/vi/indexing/tap.mdx b/website/src/pages/vi/indexing/tap.mdx index f4536d331674..eccf6efc1d41 100644 --- a/website/src/pages/vi/indexing/tap.mdx +++ b/website/src/pages/vi/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From a84ce317a86b57f574e14604c40add1a8121dd0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:18 -0500 Subject: [PATCH 0109/1534] New translations tap.mdx (Marathi) --- website/src/pages/mr/indexing/tap.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/mr/indexing/tap.mdx b/website/src/pages/mr/indexing/tap.mdx index e1c1886f69a6..f6248123d886 100644 --- a/website/src/pages/mr/indexing/tap.mdx +++ b/website/src/pages/mr/indexing/tap.mdx @@ -9,7 +9,7 @@ Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, T [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: - Efficiently handles micropayments. -- Adds a layer of consolidations to on-chain transactions and costs. +- Adds a layer of consolidations to onchain transactions and costs. - Allows Indexers control of receipts and payments, guaranteeing payment for queries. - It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. @@ -53,7 +53,7 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed ### Gateway -| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| Component | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | | ---------- | --------------------------------------------- | --------------------------------------------- | | Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | | Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | @@ -63,10 +63,10 @@ As long as you run `tap-agent` and `indexer-agent`, everything will be executed In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployment. As a result, you have to index it manually. ## Migration Guide From babd57c5c62d431116a59fca418bed88f792b22c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:19 -0500 Subject: [PATCH 0110/1534] New translations tap.mdx (Hindi) --- website/src/pages/hi/indexing/tap.mdx | 97 ++++++++++++++------------- 1 file changed, 49 insertions(+), 48 deletions(-) diff --git a/website/src/pages/hi/indexing/tap.mdx b/website/src/pages/hi/indexing/tap.mdx index dc46d2047b0a..68567f92acc5 100644 --- a/website/src/pages/hi/indexing/tap.mdx +++ b/website/src/pages/hi/indexing/tap.mdx @@ -9,7 +9,7 @@ The Graph के नए भुगतान प्रणाली, Timeline Aggre [TAP](https://docs.rs/tap_core/latest/tap_core/index.html) मौजूदा Scalar भुगतान प्रणाली का एक ड्रॉप-इन प्रतिस्थापन है। यह निम्नलिखित प्रमुख सुविधाएँ प्रदान करता है: - सूक्ष्म भुगतानों को कुशलता से संभालता है। -- ऑन-चेन लेनदेन और लागतों में समेकनों की एक परत जोड़ता है। +- ऑनचेन लेनदेन और लागतों में समेकन की एक परत जोड़ता है। - प्राप्तियों और भुगतान पर Indexers को नियंत्रण की अनुमति देता है, प्रश्नों के लिए भुगतान की गारंटी देता है। - यह विकेन्द्रीकृत, विश्वास रहित गेटवे को सक्षम बनाता है और कई भेजने वालों के लिए indexer-service के प्रदर्शन में सुधार करता है। @@ -51,28 +51,28 @@ TAP एक प्रेषक को एक प्राप्तकर्ता | AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | | Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | -### गेटवे +### गेटवे -| घटक | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | -| ---------------- | --------------------------------------------- | --------------------------------------------- | -| प्रेषक | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | -| हस्ताक्षरकर्ता | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | -| संकेन्द्रीयकर्ता | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | +| घटक | Edge and Node Mainnet (Arbitrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| ----------------- | --------------------------------------------- | --------------------------------------------- | +| प्रेषक | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| हस्ताक्षरकर्ता | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| संकेन्द्रीयकर्ता | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | ### Requirements एक Indexer चलाने की सामान्य आवश्यकताओं के अलावा, आपको TAP अपडेट को क्वेरी करने के लिए एक tap-escrow-subgraph एंडपॉइंट की आवश्यकता होगी। आप TAP को क्वेरी करने के लिए The Graph Network का उपयोग कर सकते हैं या अपने graph-node पर स्वयं होस्ट कर सकते हैं। -- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum Sepolia subgraph (The Graph टेस्टनेट के लिए)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) - [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) -> टिप्पणी:indexer-agent वर्तमान में इस subgraph की indexing को वैसे हैंडल नहीं करता जैसे वह network subgraph deployment के लिए करता है। इसके परिणामस्वरूप, आपको इसे मैन्युअली इंडेक्स करना होगा। +> नोट: `indexer-agent` वर्तमान में इस subgraph का indexing नेटवर्क subgraph डिप्लॉयमेंट की तरह नहीं करता है। इसके परिणामस्वरूप, आपको इसे मैन्युअल रूप से इंडेक्स करना होगा। ## माइग्रेशन गाइड ### सॉफ़्टवेयर संस्करण -The required software version can be found [here](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). +आवश्यक सॉफ़्टवेयर संस्करण [here](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases) पाया जा सकता है। ### कदम @@ -99,79 +99,80 @@ The required software version can be found [here](https://github.com/graphprotoc "कम से कम कॉन्फ़िगरेशन के लिए, निम्नलिखित टेम्पलेट का उपयोग करें:" ```bash -# You will have to change *all* the values below to match your setup. +#आपको नीचे दिए गए *सभी* मान अपनी सेटअप के अनुसार बदलने होंगे। +*नीचे दिए गए कुछ कॉन्फ़िग वैल्यू ग्लोबल ग्राफ नेटवर्क वैल्यू हैं, जिन्हें आप यहां पा सकते हैं: # -# Some of the config below are global graph network values, which you can find here: -# + # -# Pro tip: if you need to load some values from the environment into this config, you -# can overwrite with environment variables. For example, the following can be replaced -# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: -# -# [database] -# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" - +#प्रो टिप: यदि आपको इस कॉन्फ़िग में कुछ मान environment से लोड करने की आवश्यकता है, तो आप environment वेरिएबल्स का उपयोग करके ओवरराइट कर सकते हैं। उदाहरण के लिए, निम्नलिखित को [PREFIX]_DATABASE_POSTGRESURL से बदला जा सकता है, जहां PREFIX `INDEXER_SERVICE` या `TAP_AGENT` हो सकता है: +[database] +#postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" [indexer] indexer_address = "0x1111111111111111111111111111111111111111" operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" [database] -# The URL of the Postgres database used for the indexer components. The same database -# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create -# the necessary tables. + +Postgres डेटाबेस का URL जो indexer components के लिए उपयोग किया जाता है। वही डेटाबेस +जो indexer-agent द्वारा उपयोग किया जाता है। यह अपेक्षित है कि indexer-agent आवश्यक तालिकाएं बनाएगा। postgres_url = "postgres://postgres@postgres:5432/postgres" [graph_node] -# URL to your graph-node's query endpoint +आपके graph-node के क्वेरी एंडपॉइंट का URL query_url = "" -# URL to your graph-node's status endpoint + +आपके graph-node के स्टेटस एंडपॉइंट का URL status_url = "" [subgraphs.network] -# Query URL for the Graph Network subgraph. +Graph Network subgraph के लिए क्वेरी URL। query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only + +वैकल्पिक, local graph-node में देखने के लिए deployment, यदि स्थानीय रूप से इंडेक्स किया गया है। +subgraph को स्थानीय रूप से इंडेक्स करना अनुशंसित है। +नोट: केवल query_url या deployment_id का उपयोग करें deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [subgraphs.escrow] -# Query URL for the Escrow subgraph. +Escrow subgraph के लिए क्वेरी URL। query_url = "" -# Optional, deployment to look for in the local `graph-node`, if locally indexed. -# Locally indexing the subgraph is recommended. -# NOTE: Use `query_url` or `deployment_id` only + +वैकल्पिक, local graph-node में देखने के लिए deployment, यदि स्थानीय रूप से इंडेक्स किया गया है। +subgraph को स्थानीय रूप से इंडेक्स करना अनुशंसित है। +नोट: केवल query_url या deployment_id का उपयोग करें deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [blockchain] -# The chain ID of the network that the graph network is running on + +उस नेटवर्क का chain ID जिस पर graph network चल रहा है chain_id = 1337 -# Contract address of TAP's receipt aggregate voucher (RAV) verifier. + +TAP के receipt aggregate voucher (RAV) verifier का कॉन्ट्रैक्ट एड्रेस। receipts_verifier_address = "0x2222222222222222222222222222222222222222" ######################################## -# Specific configurations to tap-agent # +#tap-agent के लिए विशिष्ट कॉन्फ़िगरेशन# ######################################## [tap] -# This is the amount of fees you are willing to risk at any given time. For ex. -# if the sender stops supplying RAVs for long enough and the fees exceed this -# amount, the indexer-service will stop accepting queries from the sender -# until the fees are aggregated. -# NOTE: Use strings for decimal values to prevent rounding errors -# e.g: -# max_amount_willing_to_lose_grt = "0.1" +#यह वह फीस की मात्रा है जिसे आप किसी भी समय जोखिम में डालने के लिए तैयार हैं। उदाहरण के लिए, +#यदि sender लंबे समय तक RAVs प्रदान करना बंद कर देता है और फीस इस +#राशि से अधिक हो जाती है, तो indexer-service sender से क्वेरी स्वीकार करना बंद कर देगा +#जब तक कि फीस को समेकित नहीं किया जाता। +#नोट: राउंडिंग त्रुटियों से बचने के लिए दशमलव मानों के लिए strings का उपयोग करें +#जैसे: +#max_amount_willing_to_lose_grt = "0.1" max_amount_willing_to_lose_grt = 20 [tap.sender_aggregator_endpoints] -# Key-Value of all senders and their aggregator endpoints -# This one below is for the E&N testnet gateway for example. +सभी senders और उनके aggregator endpoints के key-value +नीचे दिया गया यह उदाहरण E&N टेस्टनेट गेटवे के लिए है। 0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" ``` टिप्पणियाँ: -- tap.sender_aggregator_endpoints के लिए मान gateway section (/indexing/tap/#gateway). में पाए जा सकते हैं। -- blockchain.receipts_verifier_address के लिए मानों का उपयोग उचित तरीके से Blockchain addresses section (/indexing/tap/#contracts) के अनुसार किया जाना चाहिए, जिसमें उचित चेन आईडी का उपयोग किया गया है। +- tap.sender_aggregator_endpoints के लिए मान gateway section(/indexing/tap/#gateway) में पाए जा सकते हैं। +- Blockchain.receipts_verifier_address के लिए मानों का उपयोग ब्लॉकचेन एड्रेस सेक्शन(/indexing/tap/#contracts) के अनुसार सही चेन आईडी का उपयोग करते हुए किया जाना चाहिए। **लॉग स्तर** @@ -186,7 +187,7 @@ max_amount_willing_to_lose_grt = 20 ### Grafana डैशबोर्ड -आप Grafana Dashboard (https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) डाउनलोड कर सकते हैं और इम्पोर्ट कर सकते हैं। +आप Grafana Dashboard (https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) डाउनलोड कर सकते हैं और इम्पोर्ट कर सकते हैं। ### लॉन्चपैड From 8f907ca32f85ab2fbdaa0b0499a4e5f3c35a44b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:20 -0500 Subject: [PATCH 0111/1534] New translations graph-node.mdx (Romanian) --- .../pages/ro/indexing/tooling/graph-node.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ro/indexing/tooling/graph-node.mdx b/website/src/pages/ro/indexing/tooling/graph-node.mdx index dbbfcd5fc545..6a27301b680b 100644 --- a/website/src/pages/ro/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ro/indexing/tooling/graph-node.mdx @@ -1,5 +1,5 @@ --- -title: Operating Graph Node +title: Graph Node --- Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. @@ -245,7 +245,7 @@ The indexer repository provides an [example Grafana configuration](https://githu The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Working with subgraphs @@ -287,7 +287,7 @@ During indexing subgraphs might fail, if they encounter data that is unexpected, In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache From 6577b7660366d81cba6891b0d560c9b780a79f7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:21 -0500 Subject: [PATCH 0112/1534] New translations graph-node.mdx (French) --- .../pages/fr/indexing/tooling/graph-node.mdx | 122 +++++++++--------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/website/src/pages/fr/indexing/tooling/graph-node.mdx b/website/src/pages/fr/indexing/tooling/graph-node.mdx index 252894e113a0..aa1e50675896 100644 --- a/website/src/pages/fr/indexing/tooling/graph-node.mdx +++ b/website/src/pages/fr/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Nœud de graph opérationnel +title: Nœud de The Graph --- Graph Node est le composant qui indexe les subgraphs et rend les données résultantes disponibles pour interrogation via une API GraphQL. En tant que tel, il est au cœur de la pile de l’indexeur, et le bon fonctionnement de Graph Node est crucial pour exécuter un indexeur réussi. -Cela fournit un aperçu contextuel de Graph Node et de certaines des options les plus avancées disponibles pour les indexeurs. Une documentation et des instructions détaillées sont disponibles dans le [référentiel Graph Node](https://github.com/graphprotocol/graph-node). +Ceci fournit un aperçu contextuel de Graph Node et de certaines des options les plus avancées disponibles pour les Indexeurs. Une documentation et des instructions détaillées peuvent être trouvées dans le dépôt [Graph Node ](https://github.com/graphprotocol/graph-node). ## Nœud de The Graph -Cela fournit un aperçu contextuel de Graph Node et de certaines des options les plus avancées disponibles pour les indexeurs. Une documentation et des instructions détaillées sont disponibles dans le [référentiel Graph Node](https://github.com/graphprotocol/graph-node). +[Graph Node](https://github.com/graphprotocol/graph-node) est l'implémentation de référence pour l'indexation des subgraphs sur The Graph Network, la connexion aux clients de la blockchain, l'indexation des subgraphs et la mise à disposition des données indexées pour les requêtes. -Graph Node (et l'ensemble de la pile d'indexation) peut être exécuté sur du métal nu ou dans un environnement cloud. Cette flexibilité du composant d’indexation central est cruciale pour la robustesse du Graph Protocol. De même, Graph Node peut être [créé à partir des sources](https://github.com/graphprotocol/graph-node), ou les indexeurs peuvent utiliser l'une des [images Docker fournies](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (et l'ensemble de la pile de l’indexeur) peut être exécuté sur serveur dédié (bare metal) ou dans un environnement cloud. Cette souplesse du composant central d'indexation est essentielle à la solidité du protocole The Graph. De même, Graph Node peut être [compilé à partir du code source](https://github.com/graphprotocol/graph-node), ou les Indexeurs peuvent utiliser l'une des [images Docker fournies](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database @@ -20,9 +20,9 @@ Le magasin principal du nœud de graph, c'est là que les données des sous-grap Pour indexer un réseau, Graph Node doit avoir accès à un client réseau via une API JSON-RPC compatible avec EVM. Cette RPC peut se connecter à un seul client ou à une configuration plus complexe qui équilibre la charge entre plusieurs clients. -Si certains subgraphs ne nécessitent qu'un noed complet, d'autres présentent des caractéristiques d'indexation qui requièrent des fonctionnalités RPC supplémentaires. En particulier, les subgraphs qui effectuent des `eth_calls` dans le cadre de l'indexation nécessiteront un noed d'archive prenant en charge [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), et les subgraphs avec des `callHandlers`, ou des `blockHandlers` avec un filtre `call`, nécessitent la prise en charge de `trace_filter` ([voir la documentation du module de trace ici](https://openethereum.github.io/JSONRPC-trace-module)). +Alors que certains subgraphs peuvent ne nécessiter qu'un nœud complet, d'autres peuvent avoir des caractéristiques d'indexation qui nécessitent des fonctionnalités RPC supplémentaires. En particulier, les subgraphs qui font des `eth_calls` dans le cadre de l'indexation nécessiteront un noeud d'archive qui supporte [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), et les subgraphs avec des `callHandlers`, ou des `blockHandlers` avec un filtre `call`, nécessitent le support de `trace_filter` ([voir la documentation du module trace ici](https://openethereum.github.io/JSONRPC-trace-module)). -**Network Firehoses** : un Firehose est un service gRPC fournissant un flux de blocs ordonné, mais compatible avec les fork, développé par les principaux développeurs de The Graph pour mieux prendre en charge une indexation performante à grande échelle. Ce n'est pas actuellement une exigence de l'indexeur, mais les indexeurs sont encouragés à se familiariser avec la technologie, avant la prise en charge complète du réseau. Apprenez-en davantage sur le Firehose [ici](https://firehose.streamingfast.io/). +\*\*Network Firehoses : un Firehose est un service gRPC fournissant un flux de blocs ordonné, mais compatible avec les fork, développé par les principaux développeurs de The Graph pour mieux prendre en charge une indexation performante à l'échelle. Il ne s'agit pas actuellement d'une exigence de l'Indexeur, mais les Indexeurs sont encouragés à se familiariser avec la technologie, en avance sur la prise en charge complète du réseau. Pour en savoir plus sur le Firehose [ici](https://firehose.streamingfast.io/). ### Nœuds IPFS @@ -32,9 +32,9 @@ Les métadonnées de déploiement de subgraphs sont stockées sur le réseau IPF Pour activer la surveillance et la création de rapports, Graph Node peut éventuellement enregistrer les métriques sur un serveur de métriques Prometheus. -### Commencer à partir des sources +### Getting started from source -#### Installer les prérequis +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ Pour activer la surveillance et la création de rapports, Graph Node peut évent - **IPFS** -- **Exigences supplémentaires pour les utilisateurs d'Ubuntu** - Pour exécuter un Graph Node sur Ubuntu, quelques packages supplémentaires peuvent être nécessaires. +- **Exigences supplémentaires pour les utilisateurs d'Ubuntu** - Pour faire fonctionner un Graph Node sur Ubuntu, quelques packages supplémentaires peuvent être nécessaires. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Installation +#### Setup -1. Démarrez un serveur de base de données PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clonez le dépôt [Graph Node](https://github.com/graphprotocol/graph-node) et créez la source en exécutant `cargo build` +2. Clonez le repo [Graph Node](https://github.com/graphprotocol/graph-node) et compilez les sources en lançant `cargo build` -3. Maintenant que toutes les dépendances sont configurées, démarrez le Graph Node : +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,56 +71,56 @@ cargo run -p graph-node --release -- \ ### Bien démarrer avec Kubernetes -Un exemple complet de configuration de Kubernetes est disponible dans le [dépôt de l'indexeur](https://github.com/graphprotocol/indexer/tree/main/k8s). +Un exemple complet de configuration Kubernetes se trouve dans le [dépôt d'Indexeur](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Ports Lorsqu'il est en cours d'exécution, Graph Node expose les ports suivants : -| Port | Objectif | Routes | Argument CLI | Variable d'environnement | -| --- | --- | --- | --- | --- | -| 8000 | Serveur HTTP GraphQL
    (pour les requêtes de subgraphs) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (pour les abonnements aux subgraphs) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (pour gérer les déploiements) | / | --admin-port | - | -| 8030 | API de statut d'indexation des subgraphs | /graphq | --index-node-port | - | -| 8040 | Métriques Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Important** : Soyez prudent lorsque vous exposez les ports publiquement : les **ports d'administration** doivent rester verrouillés. Cela inclut le point de terminaison Graph Node JSON-RPC. +> **Important** : Soyez prudent lorsque vous exposez des ports publiquement - les **ports d'administration** doivent être verrouillés. Ceci inclut l'endpoint JSON-RPC de Graph Node. ## Configuration avancée du nœud graph Dans sa forme la plus simple, Graph Node peut être utilisé avec une seule instance de Graph Node, une seule base de données PostgreSQL, un nœud IPFS et les clients réseau selon les besoins des subgraphs à indexer. -Cette configuration peut être mise à l'échelle horizontalement, en ajoutant plusieurs nœuds graphs et plusieurs bases de données pour prendre en charge ces nœuds graphs. Les utilisateurs avancés voudront peut-être profiter de certaines des capacités de mise à l'échelle horizontale de Graph Node, ainsi que de certaines des options de configuration les plus avancées, via le fichier `config.toml` et les variables d'environnement de Graph Node. +Cette configuration peut être mise à l'échelle horizontalement, en ajoutant plusieurs Graph Nodes, et plusieurs bases de données pour supporter ces Graph Nodes. Les utilisateurs avancés voudront peut-être profiter de certaines des capacités de mise à l'échelle horizontale de Graph Node, ainsi que de certaines des options de configuration les plus avancées, via le fichier `config.toml` et les variables d'environnement de Graph Node. ### `config.toml` -Un fichier de configuration [TOML](https://toml.io/en/) peut être utilisé pour définir des configurations plus complexes que celles exposées dans la CLI. L'emplacement du fichier est transmis avec le commutateur de ligne de commande --config. +Un fichier de configuration [TOML](https://toml.io/en/) peut être utilisé pour définir des configurations plus complexes que celles exposées dans la CLI. L'emplacement du fichier est transmis avec via l’option de ligne de commande --config. > Lors de l'utilisation d'un fichier de configuration, il n'est pas possible d'utiliser les options --postgres-url, --postgres-secondary-hosts et --postgres-host-weights. -Un fichier `config.toml` minimal peut être fourni ; le fichier suivant équivaut à l'utilisation de l'option de ligne de commande --postgres-url : +Un fichier `config.toml` minimal peut être fourni ; le fichier suivant équivaut à l'utilisation de l'option --postgres-url en ligne de commande : ```toml [store] [store.primary] -connection="<.. postgres-url argument ..>" +connection="<..argument postgres-url ..>" [deployment] [[deployment.rule]] indexers = [ "<.. liste de tous les nœuds d'indexation ..>" ] ``` -La documentation complète de `config.toml` est disponible dans la [documentation Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +La documentation complète de `config.toml` peut être trouvée dans la [documentation de Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +L'indexation Graph Node peut être mise à l'échelle horizontalement, en exécutant plusieurs instances de Graph Node pour répartir l'indexation et l'interrogation sur différents nœuds. Cela peut être fait simplement en exécutant des Graph Nodes configurés avec un `node_id` différent au démarrage (par exemple dans le fichier Docker Compose), qui peut ensuite être utilisé dans le fichier `config.toml` pour spécifier les [nœuds de requête dédiés](#dedicated-query-nodes), les [ingesteurs de blocs](#dedicated-block-ingestion) et en répartissant les subgraphs sur les nœuds avec des [règles de déploiement](#deployment-rules). > Notez que plusieurs nœuds de graph peuvent tous être configurés pour utiliser la même base de données, qui elle-même peut être mise à l'échelle horizontalement via le partitionnement. #### Règles de déploiement -Étant donné plusieurs nœuds de graph, il est nécessaire de gérer le déploiement de nouveaux subgraphs afin qu'un même subgraph ne soit pas indexé par deux nœuds différents, ce qui entraînerait des collisions. Cela peut être fait à l'aide de règles de déploiement, qui peuvent également spécifier dans quelle `partition` les données d'un subgraph doivent être stockées, si la partition de base de données est utilisée. Les règles de déploiement peuvent correspondre au nom du subgraph et au réseau que le déploiement indexe afin de prendre une décision. +Étant donné plusieurs Graph Node, il est nécessaire de gérer le déploiement de nouveaux subgraphs afin que le même subgraph ne soit pas indexé par deux nœuds différents, ce qui entraînerait des collisions. Cela peut être fait en utilisant des règles de déploiement, qui peuvent également spécifier dans quel `shard` les données d'un subgraph doivent être stockées, si le partitionnement de base de données est utilisé. Les règles de déploiement peuvent correspondre au nom du subgraph et au réseau que le déploiement indexe afin de prendre une décision. Exemple de configuration de règle de déploiement : @@ -158,7 +158,7 @@ Les nœuds peuvent être configurés pour être explicitement des nœuds de requ ```toml [general] -query = "" +query = "" ``` Tout nœud dont --node-id correspond à l'expression régulière sera configuré pour répondre uniquement aux requêtes. @@ -167,7 +167,7 @@ Tout nœud dont --node-id correspond à l'expression régulière sera configuré Pour la plupart des cas d'utilisation, une seule base de données Postgres suffit pour prendre en charge une instance de nœud graph. Lorsqu'une instance de nœud graph dépasse une seule base de données Postgres, il est possible de diviser le stockage des données de nœud graph sur plusieurs bases de données Postgres. Toutes les bases de données forment ensemble le magasin de l’instance de nœud graph. Chaque base de données individuelle est appelée une partition. -Les fragments peuvent être utilisés pour répartir les déploiements de subgraphs sur plusieurs bases de données, et peuvent également être utilisés pour utiliser des réplicas afin de répartir la charge des requêtes entre les bases de données. Cela inclut la configuration du nombre de connexions de base de données disponibles que chaque `nœud de graph` doit conserver dans son pool de connexions pour chaque base de données, ce qui devient de plus en plus important à mesure que de plus en plus de subgraphs sont indexés. +Les fragments peuvent être utilisés pour diviser les déploiements de subgraph sur plusieurs bases de données et peuvent également être utilisés pour faire intervenir des réplicas afin de répartir la charge de requête sur plusieurs bases de données. Cela inclut la configuration du nombre de connexions de base de données disponibles que chaque `graph-node` doit conserver dans son pool de connexions pour chaque base de données, ce qui devient de plus en plus important à mesure que davantage de subgraph sont indexés. Le partage devient utile lorsque votre base de données existante ne peut pas suivre la charge que Graph Node lui impose et lorsqu'il n'est plus possible d'augmenter la taille de la base de données. @@ -175,11 +175,11 @@ Le partage devient utile lorsque votre base de données existante ne peut pas su En termes de configuration des connexions, commencez par max_connections dans postgresql.conf défini sur 400 (ou peut-être même 200) et regardez les métriques store_connection_wait_time_ms et store_connection_checkout_count Prometheus. Des temps d'attente notables (tout ce qui dépasse 5 ms) indiquent qu'il y a trop peu de connexions disponibles ; des temps d'attente élevés seront également dus au fait que la base de données est très occupée (comme une charge CPU élevée). Cependant, si la base de données semble par ailleurs stable, des temps d'attente élevés indiquent la nécessité d'augmenter le nombre de connexions. Dans la configuration, le nombre de connexions que chaque instance de nœud graph peut utiliser constitue une limite supérieure, et Graph Node ne maintiendra pas les connexions ouvertes s'il n'en a pas besoin. -En savoir plus sur la configuration du magasin [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +En savoir plus sur la configuration du store [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Ingestion de blocs dédiés -Si plusieurs nœuds sont configurés, il sera nécessaire de spécifier un nœud responsable de l'ingestion de nouveaux blocs, afin que tous les nœuds d'index configurés n'interrogent pas la tête de chaîne. Cela se fait dans le cadre de l'espace de noms `chains`, en spécifiant le `node_id` à utiliser pour l'ingestion de bloc : +Si plusieurs nœuds sont configurés, il sera nécessaire de spécifier un nœud responsable de l'ingestion de nouveaux blocs, afin que tous les nœuds d’indexation ne sondent pas simultanément le head de la chaîne. Cela s’effectue dans la section `chains` du fichier de configuration, en spécifiant le `node_id` à utiliser pour l'ingestion de blocs : ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### Prise en charge de plusieurs réseaux -Le protocole Graph augmente le nombre de réseaux pris en charge pour l'indexation des récompenses, et il existe de nombreux subgraphs indexant des réseaux non pris en charge qu'un indexeur aimerait traiter. Le fichier `config.toml` permet une configuration expressive et flexible de : +The Graph Protocol augmente le nombre de réseaux pris en charge pour l'indexation des récompenses, et il existe de nombreux subgraphs indexant des réseaux non pris en charge. Un indexeur peut choisir de les indexer malgré tout. Le fichier `config.toml` permet une configuration riche et flexible : - Plusieurs réseaux - Plusieurs fournisseurs par réseau (cela peut permettre de répartir la charge entre les fournisseurs, et peut également permettre la configuration de nœuds complets ainsi que de nœuds d'archives, Graph Node préférant les fournisseurs moins chers si une charge de travail donnée le permet). - Détails supplémentaires sur le fournisseur, tels que les fonctionnalités, l'authentification et le type de fournisseur (pour la prise en charge expérimentale de Firehose) -La section `[chains]` contrôle les fournisseurs Ethereum auxquels graph-node se connecte et où sont stockés les blocs et autres métadonnées de chaque chaîne. L'exemple suivant configure deux chaînes, mainnet et kovan, où les blocs pour le réseau principal sont stockés dans la partition vip et les blocs pour kovan sont stockés dans la partition principale. La chaîne du mainnet peut utiliser deux fournisseurs différents, alors que kovan n'a qu'un seul fournisseur. +La section `[chains]` contrôle les fournisseurs ethereum auxquels graph-node se connecte, et où les blocs et autres métadonnées pour chaque chaîne sont stockés. L'exemple suivant configure deux chaînes, mainnet et kovan, où les blocs pour mainnet sont stockés dans le shard vip et les blocs pour kovan sont stockés dans le shard primaire. La chaîne mainnet peut utiliser deux fournisseurs différents, alors que kovan n'a qu'un seul fournisseur. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -En savoir plus sur la configuration du fournisseur [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Consultez les informations sur la configuration des fournisseurs [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Variables d'environnement -Graph Node prend en charge une gamme de variables d'environnement qui peuvent activer des fonctionnalités ou modifier le comportement de Graph Node. Ceux-ci sont documentés [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node supporte une série de variables d'environnement qui peuvent activer des fonctionnalités ou modifier le comportement de Graph Node. Elles sont documentées [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Déploiement continu Les utilisateurs qui utilisent une configuration d'indexation à grande échelle avec une configuration avancée peuvent bénéficier de la gestion de leurs nœuds graph avec Kubernetes. -- Le dépôt de l'indexeur contient un [exemple de référence Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) est une boîte à outils permettant d'exécuter un indexeur de protocole graph sur Kubernetes géré par GraphOps. Il fournit un ensemble de graph Helm et une CLI pour gérer un déploiement de Graph Node. +- Le dépot de l'Indexeur a un [exemple de référence Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) est une boîte à outils pour l'exécution d'un Graph Protocol Indexer sur Kubernetes maintenue par GraphOps. Il fournit un ensemble de graphiques Helm et une CLI pour gérer le déploiement Graph Node. ### Gestion du nœud de graph @@ -229,23 +229,23 @@ Les utilisateurs qui utilisent une configuration d'indexation à grande échelle #### Journal de bord -Les journaux de Graph Node peuvent fournir des informations utiles pour le débogage et l'optimisation de Graph Node et de subgraphs spécifiques. Graph Node prend en charge différents niveaux de journalisation via la variable d'environnement `GRAPH_LOG`, avec les niveaux suivants : erreur, avertissement, information, débogage ou trace. +Les logs de Graph Node peuvent fournir des informations utiles pour le débogage et l'optimisation de Graph Node et de subgraphs spécifiques. Graph Node supporte différents niveaux de logs via la variable d'environnement `GRAPH_LOG`, avec les niveaux suivants : error, warn, info, debug ou trace. -De plus, définir `GRAPH_LOG_QUERY_TIMING` sur `gql` fournit plus de détails sur la façon dont les requêtes GraphQL sont exécutées (bien que cela génère un grand volume de journaux). +De plus, fixer `GRAPH_LOG_QUERY_TIMING` à `gql` fournit plus de détails sur la façon dont les requêtes GraphQL s'exécutent (bien que cela génère un grand volume de logs). -#### Monitoring & alerting +#### Supervision & alertes Graph Node fournit les métriques via le point de terminaison Prometheus sur le port 8040 par défaut. Grafana peut ensuite être utilisé pour visualiser ces métriques. -Le référentiel de l'indexeur fournit un [exemple de configuration Grafana](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +Le dépôt indexer propose un [exemple de configuration Grafana] (https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman `graphman` est un outil de maintenance pour Graph Node, aidant au diagnostic et à la résolution de différentes tâches quotidiennes et exceptionnelles. -La commande graphman est incluse dans les conteneurs officiels et vous pouvez docker exec dans votre conteneur graph-node pour l'exécuter. Il nécessite un fichier `config.toml`. +La commande graphman est incluse dans les conteneurs officiels, et vous pouvez docker exec dans votre conteneur graph-node pour l'exécuter. Elle nécessite un fichier `config.toml`. -La documentation complète des commandes `graphman` est disponible dans le référentiel Graph Node. Voir \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) dans le nœud graphique `/docs` +La documentation complète des commandes `graphman` est disponible dans le dépôt Graph Node. Voir [/docs/graphman.md](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) dans le dépôt Graph Node `/docs` ### Travailler avec des subgraphs @@ -267,8 +267,8 @@ Ces étapes sont pipeline (c’est-à-dire qu’elles peuvent être exécutées Causes courantes de lenteur d’indexation : -- Temps nécessaire pour trouver les événements pertinents de la chaîne (les gestionnaires d'appels en particulier peuvent être lents, étant donné le recours à `trace_filter`) -- Effectuer un grand nombre d'`eth_calls` dans le cadre des gestionnaires +- Temps nécessaire pour trouver les événements pertinents à partir de la chaîne (les gestionnaires d'appels en particulier peuvent être lents, étant donné la dépendance à `trace_filter`) +- Faire un grand nombre d'appels `eth_call` dans les gestionnaires - Une grande quantité d'interactions avec le magasin pendant l'exécution - Une grande quantité de données à sauvegarder dans le magasin - Un grand nombre d'événements à traiter @@ -287,18 +287,18 @@ Lors de l'indexation, les subgraphs peuvent échouer s'ils rencontrent des donn Dans certains cas, un échec peut être résolu par l'indexeur (par exemple, si l'erreur est due au fait de ne pas disposer du bon type de fournisseur, l'ajout du fournisseur requis permettra de poursuivre l'indexation). Cependant, dans d'autres cas, une modification du code du subgraph est requise. -> Les échecs déterministes sont considérés comme « définitifs », avec une preuve d'indexation générée pour le bloc défaillant, tandis que les échecs non déterministes ne le sont pas, car le subgraph peut réussir à « échouer » et continuer l'indexation. Dans certains cas, l'étiquette non déterministe est incorrecte et le subgraph ne surmontera jamais l'erreur ; ces échecs doivent être signalés en tant que problèmes sur le référentiel Graph Node. +> Les défaillances déterministes sont considérés comme "final" (définitifs), avec une preuve d'indexation générée pour le bloc défaillant, alors que les défaillances non déterministes ne le sont pas, car le subgraph pourait "se rétablir " et poursuivre l'indexation. Dans certains cas, l'étiquette non déterministe est incorrecte et le subgraph ne surmontera jamais l'erreur ; de tels défaillances doivent être signalés en tant que problèmes sur le dépôt de Graph Node. #### Bloquer et appeler le cache -Graph Node met en cache certaines données dans le magasin afin d'économiser la récupération auprès du fournisseur. Les blocs sont mis en cache, tout comme les résultats de `eth_calls` (ces derniers étant mis en cache à partir d'un bloc spécifique). Cette mise en cache peut augmenter considérablement la vitesse d'indexation lors de la « resynchronisation » d'un subgraph légèrement modifié. +Graph Node met en cache certaines données dans le store afin d'éviter de les récupérer auprès du fournisseur. Les blocs sont mis en cache, ainsi que les résultats des `eth_calls` (ces derniers étant mis en cache à partir d'un bloc spécifique). Cette mise en cache peut augmenter considérablement la vitesse d'indexation lors de la « resynchronisation » d'un subgraph légèrement modifié. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +Cependant, dans certains cas, si un nœud Ethereum a fourni des données incorrectes pendant une certaine période, cela peut se retrouver dans le cache, conduisant à des données incorrectes ou à des subgraphs défaillants. Dans ce cas, les Indexeurs peuvent utiliser `graphman` pour effacer le cache empoisonné, puis rembobiner les subgraph affectés, ce qui permettra de récupérer des données fraîches auprès du fournisseur (que l'on espère sain). Si une incohérence du cache de blocs est suspectée, telle qu'un événement de réception de transmission manquant : -1. `liste de chaînes graphman` pour trouver le nom de la chaîne. -2. `graphman chain check-blocks par numéro ` vérifiera si le bloc mis en cache correspond au fournisseur et supprimera le bloc du cache si ce n'est pas le cas. +1. `graphman chain list` pour trouver le nom de la chaîne. +2. `graphman chain check-blocks by-number ` vérifiera si le bloc mis en cache correspond au fournisseur, et supprimera le bloc du cache si ce n'est pas le cas. 1. S'il y a une différence, il peut être plus sûr de tronquer tout le cache avec `graphman chain truncate `. 2. Si le bloc correspond au fournisseur, le problème peut être débogué directement auprès du fournisseur. @@ -306,13 +306,13 @@ Si une incohérence du cache de blocs est suspectée, telle qu'un événement de Une fois qu'un subgraph a été indexé, les indexeurs peuvent s'attendre à traiter les requêtes via le point de terminaison de requête dédié du subgraph. Si l'indexeur espère traiter un volume de requêtes important, un nœud de requête dédié est recommandé, et en cas de volumes de requêtes très élevés, les indexeurs peuvent souhaiter configurer des fragments de réplique afin que les requêtes n'aient pas d'impact sur le processus d'indexation. -However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. +Cependant, même avec un nœud de requête et des répliques dédiés, certaines requêtes peuvent prendre beaucoup de temps à exécuter et, dans certains cas, augmenter l'utilisation de la mémoire et avoir un impact négatif sur le temps de requête des autres utilisateurs. Il n'existe pas de solution miracle, mais une gamme d'outils permettant de prévenir, de diagnostiquer et de traiter les requêtes lentes. ##### Mise en cache des requêtes -Graph Node met en cache les requêtes GraphQL par défaut, ce qui peut réduire considérablement la charge de la base de données. Cela peut être configuré davantage avec les paramètres `GRAPH_QUERY_CACHE_BLOCKS` et `GRAPH_QUERY_CACHE_MAX_MEM` - pour en savoir plus [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Node met en cache les requêtes GraphQL par défaut, ce qui peut réduire de manière significative la charge de la base de données. Ceci peut être configuré avec les paramètres `GRAPH_QUERY_CACHE_BLOCKS` et `GRAPH_QUERY_CACHE_MAX_MEM` - en savoir plus [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Analyser les requêtes @@ -320,7 +320,7 @@ Les requêtes problématiques apparaissent le plus souvent de deux manières. Da Dans d'autres cas, le déclencheur peut être une utilisation élevée de la mémoire sur un nœud de requête, auquel cas le défi consiste d'abord à identifier la requête à l'origine du problème. -Les indexeurs peuvent utiliser [qlog](https://github.com/graphprotocol/qlog/) pour traiter et résumer les journaux de requêtes de Graph Node. `GRAPH_LOG_QUERY_TIMING` peut également être activé pour aider à identifier et déboguer les requêtes lentes. +Les Indexeurs peuvent utiliser [qlog](https://github.com/graphprotocol/qlog/) pour traiter et résumer les logs de requêtes de Graph Node. `GRAPH_LOG_QUERY_TIMING` peut également être activé pour aider à identifier et déboguer les requêtes lentes. Étant donné une requête lente, les indexeurs disposent de quelques options. Bien entendu, ils peuvent modifier leur modèle de coûts pour augmenter considérablement le coût d’envoi de la requête problématique. Cela peut entraîner une réduction de la fréquence de cette requête. Cependant, cela ne résout souvent pas la cause première du problème. @@ -328,18 +328,18 @@ Les indexeurs peuvent utiliser [qlog](https://github.com/graphprotocol/qlog/) po Les tables de base de données qui stockent les entités semblent généralement se décliner en deux variétés : les tables de type « transaction », où les entités, une fois créées, ne sont jamais mises à jour, c'est-à-dire qu'elles stockent quelque chose qui s'apparente à une liste de transactions financières, et les « de type compte », où les entités sont mis à jour très souvent, c'est-à-dire qu'ils stockent quelque chose comme des comptes financiers qui sont modifiés à chaque fois qu'une transaction est enregistrée. Les tables de type compte se caractérisent par le fait qu'elles contiennent un grand nombre de versions d'entités, mais relativement peu d'entités distinctes. Souvent, dans de tels tableaux, le nombre d'entités distinctes représente 1 % du nombre total de lignes (versions d'entités) -Pour les tables de type compte, `graph-node` peut générer des requêtes qui tirent parti des détails de la façon dont Postgres finit par stocker les données avec un taux de changement si élevé, à savoir que toutes les versions des blocs récents sont en une petite sous-section du stockage global pour une telle table. +Pour les tables de type compte, `graph-node` peut générer des requêtes qui tirent parti des détails de la façon dont Postgres stocke les données avec un taux de changement aussi élevé, à savoir que toutes les versions des blocs récents se trouvent dans une petite sous-section du stockage global d'une telle table. -La commande `graphman stats show indique, pour chaque type/table d'entité dans un déploiement, le nombre d'entités distinctes et le nombre de versions d'entité que chaque table contient. Ces données sont basées sur des estimations internes à Postgres et sont donc nécessairement imprécises et peuvent être erronées d'un ordre de grandeur. Un `-1` dans la colonne `entités` signifie que Postgres estime que toutes les lignes contiennent une entité distincte. +La commande `graphman stats show montre, pour chaque type/table d'entité dans un déploiement, combien d'entités distinctes, et combien de versions d'entités chaque table contient. Ces données sont basées sur des estimations internes à Postgres, et sont donc nécessairement imprécises, et peuvent être erronées d'un ordre de grandeur. Un `-1` dans la colonne `entités` signifie que Postgres pense que toutes les lignes contiennent une entité distincte. -En général, les tables dans lesquelles le nombre d'entités distinctes est inférieur à 1 % du nombre total de lignes/versions d'entités sont de bons candidats pour l'optimisation de type compte. Lorsque la sortie de `graphman stats show` indique qu'une table pourrait bénéficier de cette optimisation, l'exécution de `graphman stats show ` effectuera un décompte complet de la table - cela peut être lent, mais donne une mesure précise du rapport entre les entités distinctes et les versions globales de l'entité. +En général, les tables où le nombre d'entités distinctes est inférieur à 1% du nombre total de versions de lignes/d'entités sont de bons candidats pour l'optimisation de type compte. Lorsque la sortie de `graphman stats show` indique qu'une table pourrait bénéficier de cette optimisation, l'exécution de `graphman stats show
    ` effectuera un comptage complet de la table - ce qui peut être lent, mais donne une mesure précise du ratio d'entités distinctes par rapport au nombre total de versions d'entités. -Une fois qu'il a été déterminé qu'une table ressemble à un compte, l'exécution de `graphman stats account-like .
    ` activera l'optimisation de type compte pour les requêtes sur cette table. L'optimisation peut être à nouveau désactivée avec `graphman stats account-like --clear .
    ` Il faut jusqu'à 5 minutes aux nœuds de requête pour remarquer que l'optimisation a été activée ou désactivée. . Après avoir activé l'optimisation, il est nécessaire de vérifier que le changement ne ralentit pas réellement les requêtes pour cette table. Si vous avez configuré Grafana pour surveiller Postgres, des requêtes lentes apparaîtraient dans `pg_stat_activity` en grand nombre, prenant plusieurs secondes. Dans ce cas, l’optimisation doit être à nouveau désactivée. +Une fois qu'une table a été déterminée comme étant de type compte, l'exécution de `graphman stats account-like .
    ` activera l'optimisation de type compte pour les requêtes sur cette table. L'optimisation peut être désactivée à nouveau avec `graphman stats account-like --clear .
    ` Il faut compter jusqu'à 5 minutes pour que les noeuds de requêtes remarquent que l'optimisation a été activée ou désactivée. Après avoir activé l'optimisation, il est nécessaire de vérifier que le changement ne ralentit pas les requêtes pour cette table. Si vous avez configuré Grafana pour surveiller Postgres, les requêtes lentes apparaîtront dans `pg_stat_activity` en grand nombre, prenant plusieurs secondes. Dans ce cas, l'optimisation doit être désactivée à nouveau. -Pour les subgraphs de type Uniswap, les tables `pair` et `token` sont les meilleurs candidats pour cette optimisation et peuvent avoir un effet considérable sur la charge de la base de données. +Pour les subgraphs de type Uniswap, les tables `pair` et `token` sont les meilleurs candidats pour cette optimisation, et peuvent avoir un effet considérable sur la charge de la base de données. #### Supprimer des subgraphs > Il s'agit d'une nouvelle fonctionnalité qui sera disponible dans Graph Node 0.29.x -À un moment donné, un indexeur souhaitera peut-être supprimer un subgraph donné. Cela peut être facilement fait via `graphman drop`, qui supprime un déploiement et toutes ses données indexées. Le déploiement peut être spécifié sous la forme d'un nom de subgraph, d'un hachage IPFS `Qm..` ou de l'espace de noms de base de données `sgdNNN`. Une documentation supplémentaire est disponible [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +A un moment donné, un Indexeur peut vouloir supprimer un subgraph donné. Cela peut être facilement fait via `graphman drop`, qui supprime un déploiement et toutes ses données indexées. Le déploiement peut être spécifié soit comme un nom de subgraph, soit comme un hash IPFS `Qm..`, ou alors comme le namespace `sgdNN` de la base de données . Une documentation plus détaillée est disponible [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From c6b637d42825764f684e7d6d0e6ff3489b1c8aa3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:22 -0500 Subject: [PATCH 0113/1534] New translations graph-node.mdx (Spanish) --- .../pages/es/indexing/tooling/graph-node.mdx | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/website/src/pages/es/indexing/tooling/graph-node.mdx b/website/src/pages/es/indexing/tooling/graph-node.mdx index 2dc80685b400..6449d0a2a8ab 100644 --- a/website/src/pages/es/indexing/tooling/graph-node.mdx +++ b/website/src/pages/es/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Operar Graph Node +title: Graph Node --- Graph Node es el componente que indexa los subgrafos, y hace que los datos resultantes estén disponibles para su consulta a través de una API GraphQL. Como tal, es fundamental para el stack del Indexador, y el correcto funcionamiento de Graph Node es crucial para ejecutar un Indexador con éxito. -Esto proporciona un resumen contextual de Graph Node, y algunas de las opciones más avanzadas disponibles para los Indexadores. Encontrarás documentación e instrucciones detalladas en el [Graph Node repository](https://github.com/graphprotocol/graph-node). +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) es la implementación de referencia para indexar subgrafos en The Graph Network, conectarse a clientes blockchain, indexar subgrafos y hacer que los datos indexados estén disponibles para su consulta. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -Graph Node (y todo el stack del Indexador) puede ejecutarse en una máquina física dedicada o en la nube. Esta flexibilidad del componente central de indexación es crucial para la robustez de The Graph Protocol. Del mismo modo, Graph Node se puede [construir desde el código fuente](https://github.com/graphprotocol/graph-node), o los Indexadores pueden utilizar una de las [imágenes Docker proporcionadas](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### Base de datos PostgreSQL @@ -20,7 +20,7 @@ El almacén principal para Graph Node, aquí es donde se almacenan los datos de Para indexar una red, Graph Node necesita acceso a un cliente de red a través de una API JSON-RPC compatible con EVM. Esta RPC puede conectarse a un solo cliente o puede ser una configuración más compleja que equilibre la carga entre varios clientes. -Mientras que algunos subgrafos pueden requerir solo un nodo completo, otros pueden tener features de indexación que requieren funcionalidades adicionales de RPC. Específicamente, los subgrafos que realicen `eth_calls` como parte de la indexación requerirán un nodo de archivo que admita el [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), y los subgrafos con` callHandlers` o `blockHandlers` con un filtro de `call` requerirán soporte de `trace_filter` ([consulta la documentación del módulo de trazas aquí](https://openethereum.github.io/JSONRPC-trace-module)). +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). @@ -32,9 +32,9 @@ Los metadatos de deploy del subgrafo se almacenan en la red IPFS. El Graph Node Para permitir la supervisión y la generación de informes, Graph Node puede registrar métricas opcionalmente en un servidor de métricas Prometheus. -### Empezar desde el origen +### Getting started from source -#### Instalar requisitos previos +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ Para permitir la supervisión y la generación de informes, Graph Node puede reg - **IPFS** -- **Requisitos adicionales para usuarios de Ubuntu**: Para ejecutar un nodo Graph en Ubuntu, es posible que se necesiten algunos paquetes adicionales. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Configuración +#### Setup -1. Inicia un servidor de base de datos PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clona el repositorio [Graph Node](https://github.com/graphprotocol/graph-node) y crea la fuente ejecutando `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Ahora que todas las dependencias están configuradas, inicia el nodo Graph: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Introducción a Kubernetes -Puedes encontrar un ejemplo completo de configuración de Kubernetes en el [Indexer Repository](https://github.com/graphprotocol/indexer/tree/main/k8s). +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Puertos Cuando está funcionando, Graph Node muestra los siguientes puertos: -| Puerto | Objeto | Rutas | Argumento CLI | Variable de Entorno | -| --- | --- | --- | --- | --- | -| 8000 | Servidor HTTP GraphQL
    (para consultas de subgrafos) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (para suscripciones a subgrafos) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (para administrar implementaciones) | / | --admin-port | - | -| 8030 | API de estado de indexación de subgrafos | /graphql | --index-node-port | - | -| 8040 | Métricas de Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Importante**: Ten cuidado con exponer puertos públicamente - los **puertos de administración** deben mantenerse bloqueados. Esto incluye el punto final JSON-RPC de Graph Node. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Configuración avanzada de Graph Node En su forma más simple, Graph Node puede funcionar con una única instancia de Graph Node, una única base de datos PostgreSQL, un nodo IPFS y los clientes de red que requieran los subgrafos a indexar. -Esta configuración puede escalarse horizontalmente, añadiendo múltiples Graph Nodes, y múltiples bases de datos para soportar esos Graph Nodes. Los usuarios avanzados pueden querer aprovechar algunas de las capacidades de escalado horizontal de Graph Node, así como algunas de las opciones de configuración más avanzadas, a través del archivo `config.toml` y las variables de entorno de Graph Node. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -Se puede utilizar un archivo de configuración [TOML](https://toml.io/en/) para establecer configuraciones más complejas que las expuestas en la CLI. La ubicación del archivo se pasa con el modificador de línea de comandos --config. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > Cuando se utiliza un archivo de configuración, no es posible utilizar las opciones --postgres-url, --postgres-secondary-hosts y --postgres-host-weights. -Se puede proveer un archivo `config.toml` mínimo; el siguiente archivo es equivalente al uso de la opción de línea de comandos --postgres-url: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -La documentación completa de `config.toml` se puede encontrar en los [documentos de Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Graph Nodes múltiples @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### Reglas de deploy -Dados múltiples Graph Nodes, es necesario gestionar el deploy de nuevos subgrafos para que el mismo subgrafo no sea indexado por dos nodos diferentes, lo que llevaría a colisiones. Esto puede hacerse usando reglas de deploy, que también pueden especificar en qué `shard` deben almacenarse los datos de un subgrafo, si se está usando la fragmentación de la base de datos. Las reglas de deploy pueden coincidir con el nombre del subgrafo y la red que el deploy está indexando para tomar una decisión. +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. Ejemplo de configuración de reglas de deploy: @@ -150,7 +150,7 @@ indexers = [ ] ``` -Más información sobre las reglas de deploy [aquí](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Nodos de consulta dedicados @@ -167,7 +167,7 @@ Cualquier nodo cuyo --node-id coincida con la expresión regular se configurará Para la mayoría de los casos de uso, una única base de datos Postgres es suficiente para soportar una instancia de graph-node. Cuando una instancia de graph-node supera una única base de datos Postgres, es posible dividir el almacenamiento de los datos de graph-node en varias bases de datos Postgres. Todas las bases de datos juntas forman el almacén de la instancia graph-node. Cada base de datos individual se denomina shard. -Los shards pueden usarse para dividir deploys de subgrafos en múltiples bases de datos, y también pueden usarse para usar réplicas para repartir la carga de consultas entre las bases de datos. Esto incluye la configuración del número de conexiones de base de datos disponibles que cada `graph-node` debe mantener en su pool de conexiones para cada base de datos, lo que se vuelve cada vez más importante a medida que se indexan más subgrafos. +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. El Sharding resulta útil cuando la base de datos existente no puede soportar la carga que le impone Graph Node y cuando ya no es posible aumentar el tamaño de la base de datos. @@ -175,11 +175,11 @@ El Sharding resulta útil cuando la base de datos existente no puede soportar la En términos de configuración de las conexiones, comienza con max_connections en postgresql.conf establecido en 400 (o tal vez incluso 200) y mira las métricas de Prometheus store_connection_wait_time_ms y store_connection_checkout_count. Tiempos de espera notables (cualquier cosa por encima de 5ms) es una indicación de que hay muy pocas conexiones disponibles; altos tiempos de espera allí también serán causados por la base de datos que está muy ocupada (como alta carga de CPU). Sin embargo, si la base de datos parece estable, los tiempos de espera elevados indican la necesidad de aumentar el número de conexiones. En la configuración, el número de conexiones que puede utilizar cada instancia de Graph Node es un límite superior, y Graph Node no mantendrá conexiones abiertas si no las necesita. -Más información sobre la configuración de almacenamiento [aquí](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Bloques de procesamiento dedicado -Si hay varios nodos configurados, será necesario especificar un nodo que sea responsable de la ingesta de nuevos bloques, con el fin de evitar que todos los nodos de indexación configurados estén consultando la cabeza de la cadena. Esto se hace como parte del espacio de nombres de las `chains`, especificando el `node_id` que se utilizará para la ingestión de bloques: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### Soporte de múltiples redes -The Graph Protocol está aumentando el número de redes admitidas para recompensas de indexación, y existen muchos subgrafos que indexan redes no soportadas que un Indexador desearía procesar. El archivo `config.toml` permite una configuración expresiva y flexible de: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Redes múltiples - Múltiples proveedores por red (esto puede permitir dividir la carga entre los proveedores, y también puede permitir la configuración de nodos completos, así como nodos de archivo, con Graph Node prefiriendo proveedores más baratos si una carga de trabajo dada lo permite). - Detalles adicionales del proveedor, como features, autenticación y tipo de proveedor (para soporte experimental de Firehose) -La sección `[chains]` controla los proveedores de ethereum a los que se conecta graph-node, y dónde se almacenan los bloques y otros metadatos de cada cadena. El siguiente ejemplo configura dos cadenas, mainnet y kovan, donde los bloques para mainnet se almacenan en el shard vip y los bloques para kovan se almacenan en el shard primary. La cadena mainnet puede utilizar dos proveedores diferentes, mientras que kovan sólo tiene un proveedor. +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Más información sobre la configuración de proveedores [aquí](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Variables del entorno -Graph Node soporta una serie de variables de entorno que pueden activar features o cambiar el comportamiento de Graph Node. Estas variables están documentadas [aquí](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Deploy continuo Los usuarios que están operando una configuración de indexación escalada con configuración avanzada pueden beneficiarse de la gestión de sus Graph Nodes con Kubernetes. -- El repositorio del Indexador tiene un [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) es un conjunto de herramientas para ejecutar un Indexador de Graph Protocol en Kubernetes mantenido por GraphOps. Proporciona un conjunto de gráficos Helm y una CLI para gestionar un deploy de Graph Node. +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### Operar Graph Node @@ -229,23 +229,23 @@ Dado un Graph Node en funcionamiento (¡o Graph Nodes!), el reto consiste en ges #### Logging -Los registros de Graph Node pueden proporcionar información útil para la depuración y optimización de Graph Node y subgrafos específicos. Graph Node soporta diferentes niveles de registro a través de la variable de entorno `GRAPH_LOG`, con los siguientes niveles: error, warn, info, debug o trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -Además, establecer `GRAPH_LOG_QUERY_TIMING` como `gql` proporciona más detalles sobre cómo se están ejecutando las consultas GraphQL (aunque esto generará un gran volumen de registros). +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### Supervisión & alerta +#### Monitoring & alerting Graph Node proporciona las métricas a través del endpoint Prometheus en el puerto 8040 por defecto. Grafana se puede utilizar para visualizar estas métricas. -El repositorio del Indexador proporciona un [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`Graphman` es una herramienta de mantenimiento para Graph Node, que ayuda en el diagnóstico y resolución de diferentes tareas cotidianas y excepcionales. +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -El comando graphman está incluido en los contenedores oficiales, y puedes docker exec en tu contenedor graph-node para ejecutarlo. Esto requiere un archivo `config.toml`. +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -La documentación completa de los comandos de `graphman` está disponible en el repositorio Graph Node. Ver \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) en el Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Trabajar con subgrafos @@ -253,7 +253,7 @@ La documentación completa de los comandos de `graphman` está disponible en el Disponible por defecto en el puerto 8030/graphql, la API de estado de indexación expone una serie de métodos para comprobar el estado de indexación de diferentes subgrafos, comprobar pruebas de indexación, inspeccionar características de subgrafos y mucho más. -El esquema completo está disponible [aquí](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Rendimiento de indexación @@ -267,8 +267,8 @@ El proceso de indexación consta de tres partes diferenciadas: Causas habituales de la lentitud de indexación: -- Tiempo empleado en encontrar eventos relevantes de la cadena (los call handlers en particular pueden ser lentos, dada la dependencia de `trace_filter`) -- Realización de un gran número de` eth_calls` como parte de los handlers +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - Gran cantidad de interacción con el depósito durante la ejecución - Una gran cantidad de datos para guardar en el depósito - Un gran número de eventos que procesar @@ -287,19 +287,19 @@ Durante la indexación, los subgrafos pueden fallar si encuentran datos inespera En algunos casos, un fallo puede ser resuelto por el Indexador (por ejemplo, si el error es resultado de no tener el tipo correcto de proveedor, añadir el proveedor necesario permitirá continuar con la indexación). Sin embargo, en otros, se requiere un cambio en el código del subgrafo. -> Los fallos deterministas se consideran "finales", con una Prueba de Indexación generada para el bloque que falla, mientras que los fallos no deterministas no lo son, ya que el subgrafo puede conseguir "no fallar" y continuar indexando. En algunos casos, la etiqueta no determinista es incorrecta, y el subgrafo nunca superará el error; tales fallos deben ser reportados como incidencias en el repositorio del Graph Node. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Caché de bloques y llamadas -Graph Node almacena en caché ciertos datos en el depósito con el fin de ahorrar refetching desde el proveedor. Los bloques se almacenan en caché, así como los resultados de `eth_calls` (estos últimos se almacenan en caché a partir de un bloque específico). Este almacenamiento en caché puede aumentar drásticamente la velocidad de indexación durante la "resincronización" de un subgrafo ligeramente alterado. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -Sin embargo, en algunos casos, si un nodo de Ethereum ha proporcionado datos incorrectos durante algún período de tiempo, esos datos incorrectos pueden almacenarse en la memoria caché, lo que puede resultar en datos incorrectos o subgrafos fallidos. En este caso, los indexadores pueden utilizar `graphman` para limpiar la memoria caché afectada y luego retroceder los subgrafos afectados, lo que permitirá obtener datos actualizados del proveedor de datos (esperemos) saludable. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Si se sospecha de una inconsistencia en el caché de bloques, como un evento de falta de recepción tx: -1. `graphman chain list` para encontrar el nombre de la cadena. -2. `graphman chain check-blocks by-number ` comprobará si el bloque en caché coincide con el proveedor, y elimina el bloque de la caché si no es así. - 1. Si hay alguna diferencia, puede ser más seguro truncar todo el caché con `graphman chain truncate `>. +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. Si el bloque coincide con el proveedor, el problema puede depurarse directamente contra el proveedor. #### Consulta de problemas y errores @@ -312,7 +312,7 @@ No existe una "bala de plata", sino toda una serie de herramientas para prevenir ##### Caché de consultas -Graph Node almacena en caché las consultas GraphQL por defecto, lo que puede reducir significativamente la carga de la base de datos. Esto se puede configurar aún más con los ajustes `GRAPH_QUERY_CACHE_BLOCKS` y `GRAPH_QUERY_CACHE_MAX_MEM` - lee más [aquí](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Análisis de consultas @@ -320,7 +320,7 @@ Las consultas problemáticas suelen surgir de dos maneras. En algunos casos, los En otros casos, el desencadenante puede ser un uso elevado de memoria en un nodo de consulta, en cuyo caso el reto consiste primero en identificar la consulta causante del problema. -Los Indexadores pueden utilizar [qlog](https://github.com/graphprotocol/qlog/) para procesar y resumir los registros de consultas de Graph Node. `GRAPH_LOG_QUERY_TIMING` también puede habilitarse para ayudar a identificar y depurar consultas lentas. +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. Ante una consulta lenta, los Indexadores tienen algunas opciones. Por supuesto, pueden alterar su modelo de costes, para aumentar significativamente el coste de envío de la consulta problemática. Esto puede dar lugar a una reducción de la frecuencia de esa consulta. Sin embargo, a menudo esto no resuelve la raíz del problema. @@ -328,18 +328,18 @@ Ante una consulta lenta, los Indexadores tienen algunas opciones. Por supuesto, Las tablas de bases de datos que almacenan entidades suelen ser de dos tipos: Las de tipo "transacción", en las que las entidades, una vez creadas, no se actualizan nunca, es decir, almacenan algo parecido a una lista de transacciones financieras, y las de tipo "cuenta", en las que las entidades se actualizan muy a menudo, es decir, almacenan algo parecido a cuentas financieras que se modifican cada vez que se registra una transacción. Las tablas tipo cuenta se caracterizan por contener un gran número de versiones de entidades, pero relativamente pocas entidades distintas. A menudo, en este tipo de tablas, el número de entidades distintas es del 1% del número total de filas (versiones de entidades) -En el caso de las tablas tipo cuenta, `graph-node` puede generar consultas que aprovechan los detalles de cómo Postgres acaba almacenando datos con una tasa de cambio tan alta, a saber, que todas las versiones de los bloques recientes se encuentran en una pequeña subsección del almacenamiento global de dicha tabla. +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -El comando `graphman stats show muestra, para cada tipo de entidad/tabla en un deploy, cuántas entidades distintas y cuántas versiones de entidades contiene cada tabla. Estos datos se basan en estimaciones internas de Postgres y, por lo tanto, son necesariamente imprecisos y pueden variar en un orden de magnitud. Un `-1` en la columna de `entidades` significa que Postgres cree que todas las filas contienen una entidad distinta. +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -En general, las tablas en las que el número de entidades distintas es inferior al 1% del número total de filas/versiones de entidad son buenas candidatas para la optimización tipo cuenta. Cuando la salida de `graphman stats show` indica que una tabla podría beneficiarse de esta optimización, la ejecución de `graphman stats show
    ` realizará un recuento completo de la tabla, que puede ser lento, pero proporciona una medida precisa de la proporción de entidades distintas con respecto al total de versiones de entidades. +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -Una vez que se ha determinado que una tabla es tipo cuenta, la ejecución de `graphman stats account-like .
    ` activará la optimización tipo cuenta para las consultas de esa tabla. La optimización se puede volver a desactivar con `graphman stats account-like --clear .
    ` Los nodos de consulta tardan hasta 5 minutos en darse cuenta de que se ha activado o desactivado la optimización. Después de activar la optimización, es necesario verificar que el cambio no hace que las consultas sean más lentas para esa tabla. Si has configurado Grafana para monitorizar Postgres, las consultas lentas aparecerán en `pg_stat_activity` en grandes números, tardando varios segundos. En ese caso, la optimización necesita ser desactivada de nuevo. +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -En el caso de los subgrafos tipo Uniswap, las tablas de `pair` y `token` son las principales candidatas para esta optimización, y pueden tener un efecto drástico en la carga de la base de datos. +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### Eliminar subgrafos > Se trata de una nueva funcionalidad, que estará disponible en Graph Node 0.29.x -En algún momento un Indexador puede querer eliminar un subgrafo dado. Esto se puede hacer fácilmente mediante `graphman drop`, que elimina un deploy y todos sus datos indexados. El deploy puede especificarse como un nombre de subgrafo, un hash IPFS `Qm..`, o el espacio de nombre de la base de datos `sgdNNN`. Más documentación disponible [aquí](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 499c70114d4d319e14734795056a2cd7dffc376a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:24 -0500 Subject: [PATCH 0114/1534] New translations graph-node.mdx (Arabic) --- .../pages/ar/indexing/tooling/graph-node.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ar/indexing/tooling/graph-node.mdx b/website/src/pages/ar/indexing/tooling/graph-node.mdx index ec9595ef8404..6a27301b680b 100644 --- a/website/src/pages/ar/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ar/indexing/tooling/graph-node.mdx @@ -1,5 +1,5 @@ --- -title: Operating Graph Node +title: Graph Node --- Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -32,9 +32,9 @@ Subgraph deployment metadata is stored on the IPFS network. The Graph Node prima To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### ابدأ من المصدر +### Getting started from source -#### متطلبات التثبيت +#### Install prerequisites - **Rust** @@ -42,7 +42,7 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **متطلبات إضافية لمستخدمي Ubuntu **- لتشغيل عقدة الرسم البياني على Ubuntu ، قد تكون هناك حاجة إلى بعض الحزم الإضافية. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpq-dev libssl-dev pkg-config #### Setup -1. شغل سيرفر قاعدة بيانات PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. استنسخ [ Graph Node ](https://github.com/graphprotocol/graph-node) وابني المصدر عن طريق تشغيل `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. ابدأ Graph Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...

    /subgraphs/name/.../... | http-port-- | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...

    /subgraphs/name/.../... | ws-port-- | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | admin-port-- | - | -| 8030 | Subgraph indexing status API | /graphql | index-node-port-- | - | -| 8040 | Prometheus metrics | /metrics | metrics-port-- | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. @@ -245,7 +245,7 @@ The indexer repository provides an [example Grafana configuration](https://githu The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Working with subgraphs @@ -287,7 +287,7 @@ During indexing subgraphs might fail, if they encounter data that is unexpected, In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache From 62c20b4dda6621dd8cd34f202212aa51aaa900f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:25 -0500 Subject: [PATCH 0115/1534] New translations graph-node.mdx (Czech) --- .../pages/cs/indexing/tooling/graph-node.mdx | 122 +++++++++--------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/website/src/pages/cs/indexing/tooling/graph-node.mdx b/website/src/pages/cs/indexing/tooling/graph-node.mdx index b6067883a47a..56b3efbcbc59 100644 --- a/website/src/pages/cs/indexing/tooling/graph-node.mdx +++ b/website/src/pages/cs/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Uzel provozního graf +title: Uzel Graf --- Graf Uzel je komponenta, která indexuje podgrafy a zpřístupňuje výsledná data k dotazování prostřednictvím rozhraní GraphQL API. Jako taková je ústředním prvkem zásobníku indexeru a její správná činnost je pro úspěšný provoz indexeru klíčová. -Tato část poskytuje kontextový přehled o uzlu Graf a o některých pokročilejších možnostech, které jsou indexátorům k dispozici. Podrobnou dokumentaci a pokyny najdete v úložišti [Graf Uzel](https://github.com/graphprotocol/graph-node). +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Uzel Graf -[Graf Uzel](https://github.com/graphprotocol/graph-node) je referenční implementace pro indexování podgrafů v síti Graf, připojení ke klientům blockchainu, indexování podgrafů a zpřístupnění indexovaných dat k dotazování. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -Graf Uzel (a celý indexer stack) lze provozovat na holém železe nebo v cloudovém prostředí. Tato flexibilita centrální indexovací komponenty je klíčová pro robustnost protokolu v Graf Protocol. Stejně tak může být Graf Uzel [postaven ze zdrojového kódu](https://github.com/graphprotocol/graph-node), nebo mohou indexátory používat jeden z [poskytovaných obrazů Docker](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### Databáze PostgreSQL @@ -20,9 +20,9 @@ Hlavní úložiště pro uzel Graf Uzel, kde jsou uložena data podgrafů, metad Aby mohl uzel Graph Node indexovat síť, potřebuje přístup k síťovému klientovi prostřednictvím rozhraní API JSON-RPC kompatibilního s EVM. Toto RPC se může připojit k jedinému klientovi nebo může jít o složitější nastavení, které vyrovnává zátěž mezi více klienty. -Zatímco některé podgrafy mohou vyžadovat pouze plný uzel, některé mohou mít indexovací funkce, které vyžadují další funkce RPC. Konkrétně podgrafy, které v rámci indexování provádějí `eth_calls`, budou vyžadovat archivní uzel, který podporuje [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), a podgrafy s `callHandlers` nebo `blockHandlers` s filtrem `call` vyžadují podporu `trace_filter` ([viz dokumentace modulu trace zde](https://openethereum.github.io/JSONRPC-trace-module)). +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Network Firehoses** - Firehose je služba gRPC poskytující uspořádaný, ale vidlicově orientovaný proud bloků, vyvinutá hlavními vývojáři Grafu pro lepší podporu výkonného indexování v měřítku. V současné době to není požadavek na indexátor, ale doporučujeme indexátorům, aby se s touto technologií seznámili ještě před plnou podporou sítě. Více informací o Firehose [zde](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS uzly @@ -32,9 +32,9 @@ Metadata nasazení podgrafů jsou uložena v síti IPFS. Uzel Graf přistupuje b Aby bylo možné monitorovat a podávat zprávy, může uzel Graf volitelně zaznamenávat metriky na metrický server Prometheus. -### Začínáme od zdroje +### Getting started from source -#### Instalace předpokladů +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ Aby bylo možné monitorovat a podávat zprávy, může uzel Graf volitelně zaz - **IPFS** -- **Další požadavky pro uživatele Ubuntu** - Pro spuštění Uzel Graf v Ubuntu může být potřeba několik dalších balíčků. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Nastavení +#### Setup -1. Spuštění databázového serveru PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Klonujte repozitář [Uzel Graf](https://github.com/graphprotocol/graph-node) a sestavte zdrojový kód spuštěním příkazu `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Nyní, když jsou všechny závislosti nastaveny, spusťte uzel Graf: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Začínáme s Kubernetes -Kompletní příklad konfigurace Kubernetes naleznete v úložišti [indexer](https://github.com/graphprotocol/indexer/tree/main/k8s). +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Ports Když je Graf Uzel spuštěn, zpřístupňuje následující ports: -| Port | Účel | Trasy | CLI Argument | Proměnná prostředí | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (pro dotazy podgrafy) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (pro odběry podgrafů) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (pro správu nasazení) | / | --admin-port | - | -| 8030 | Stav indexování podgrafů API | /graphql | --index-node-port | - | -| 8040 | Metriky Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Důležité**: Dávejte pozor na veřejné vystavování portů - **administrační porty** by měly být uzamčeny. To se týká i koncového bodu JSON-RPC uzlu Graf. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Pokročilá konfigurace uzlu Graf V nejjednodušším případě lze Graf Uzel provozovat s jednou instancí Graf Uzel, jednou databází PostgreSQL, uzlem IPFS a síťovými klienty podle potřeby indexovaných podgrafů. -Toto nastavení lze horizontálně škálovat přidáním více graf uzlů a více databází pro podporu těchto graf uzlů. Pokročilí uživatelé mohou chtít využít některé možnosti horizontálního škálování Graf Uzel a také některé pokročilejší možnosti konfigurace prostřednictvím souboru `config.toml` a proměnných prostředí Graph Node. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -Konfigurační soubor [TOML](https://toml.io/en/) lze použít k nastavení složitějších konfigurací, než jaké jsou dostupné v CLI. Umístění souboru se předává pomocí přepínače příkazového řádku --config. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > Při použití konfiguračního souboru není možné použít volby --postgres-url, --postgres-secondary-hosts a --postgres-host-weights. -Lze zadat minimální soubor `config.toml`; následující soubor je ekvivalentní použití volby příkazového řádku --postgres-url: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -Úplnou dokumentaci `config.toml` lze nalézt v [dokumentech Graf Uzel](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Více uzlů graf @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### Pravidla nasazení -Při více uzlech graf je nutné řídit nasazení nových podgrafů tak, aby stejný podgraf nebyl indexován dvěma různými uzly, což by vedlo ke kolizím. To lze provést pomocí pravidel nasazení, která mohou také určit, do kterého `shardu` mají být data podgrafu uložena, pokud se používá rozdělení databáze. Pravidla nasazení mohou odpovídat názvu podgrafu a síti, kterou nasazení indexuje, aby bylo možné učinit rozhodnutí. +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. Příklad konfigurace pravidla nasazení: @@ -150,7 +150,7 @@ indexers = [ ] ``` -Více informací o pravidlech nasazení [zde](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Vyhrazené dotazovací uzly @@ -167,7 +167,7 @@ Každý uzel, jehož --node-id odpovídá regulárnímu výrazu, bude nastaven t Pro většinu případů použití postačuje k podpoře instance graf uzlu jedna databáze Postgres. Pokud instance graf uzlu přeroste rámec jedné databáze Postgres, je možné rozdělit ukládání dat grafového uzlu do více databází Postgres. Všechny databáze dohromady tvoří úložiště instance graf uzlu. Každá jednotlivá databáze se nazývá shard. -Střepy lze použít k rozdělení nasazení dílčích graf do více databází a lze je také použít k použití replik k rozložení zátěže dotazů mezi databázemi. To zahrnuje konfiguraci počtu dostupných databázových připojení, které by měl každý `graf-node` udržovat ve svém fondu připojení pro každou databázi, což je stále důležitější, když se indexuje více podgrafů. +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. Sharding se stává užitečným, když vaše stávající databáze nedokáže udržet krok se zátěží, kterou na ni Graf Uzel vyvíjí, a když už není možné zvětšit velikost databáze. @@ -175,11 +175,11 @@ Sharding se stává užitečným, když vaše stávající databáze nedokáže Pokud jde o konfiguraci připojení, začněte s max_connections v souboru postgresql.conf nastaveným na 400 (nebo možná dokonce 200) a podívejte se na metriky store_connection_wait_time_ms a store_connection_checkout_count Prometheus. Výrazné čekací doby (cokoli nad 5 ms) jsou známkou toho, že je k dispozici příliš málo připojení; vysoké čekací doby tam budou také způsobeny tím, že databáze je velmi vytížená (například vysoké zatížení procesoru). Pokud se však databáze jinak jeví jako stabilní, vysoké čekací doby naznačují potřebu zvýšit počet připojení. V konfiguraci je horní hranicí, kolik připojení může každá instance graf uzlu používat, a graf uzel nebude udržovat otevřená připojení, pokud je nepotřebuje. -Více informací o konfiguraci obchodu [zde](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Vyhrazené zpracování bloků -Pokud je nakonfigurováno více uzlů, je nutné určit jeden uzel, který je zodpovědný za přijímání nových bloků, aby všechny nakonfigurované indexové uzly neprováděly dotazování hlavy řetězce. To se provádí v rámci jmenného prostoru `chains`, kde se zadává `id_uzlu`, který se má používat pro přijímání bloků: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### Podpora více sítí -Graf protokol zvyšuje počet sítí podporovaných pro indexaci odměn a existuje mnoho podgrafů indexujících nepodporované sítě, které by indexátor rád zpracoval. Soubor `config.toml` umožňuje expresivní a flexibilní konfiguraci: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Více sítí - Více poskytovatelů na síť (to může umožnit rozdělení zátěže mezi poskytovatele a také konfiguraci plných uzlů i archivních uzlů, přičemž Graph Node může preferovat levnější poskytovatele, pokud to daná pracovní zátěž umožňuje). - Další údaje o poskytovateli, jako jsou funkce, ověřování a typ poskytovatele (pro experimentální podporu Firehose) -Sekce `[chains]` řídí, ke kterým poskytovatelům ethereum se graf uzel připojuje a kde jsou uloženy bloky a další metadata pro jednotlivé řetězce. Následující příklad konfiguruje dva řetězce, mainnet a kovan, přičemž bloky pro mainnet jsou uloženy ve shard vip a bloky pro kovan jsou uloženy v primárním shard. Řetězec mainnet může používat dva různé poskytovatele, zatímco kovan má pouze jednoho poskytovatele. +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Více informací o konfiguraci poskytovatele [zde](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Proměnná prostředí -Graf Uzel podporuje řadu proměnných prostředí, které mohou povolit funkce nebo změnit chování Graf Uzel. Jsou zdokumentovány [zde](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Průběžné nasazování Uživatelé, kteří provozují škálované nastavení indexování s pokročilou konfigurací, mohou využít správu svých graf uzlů pomocí Kubernetes. -- V úložišti indexeru je [příklad odkazu Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) je sada nástrojů pro provozování Graf Protocol Indexeru v Kubernetes, kterou spravuje společnost GraphOps. Poskytuje sadu grafů Helm a CLI pro správu nasazení uzlu Graf. +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### Správa uzlu graf @@ -229,23 +229,23 @@ Vzhledem k běžícímu uzlu Graf (nebo uzlům Graf Uzel!) je pak úkolem spravo #### Protokolování -Protokoly Graf Uzel mohou poskytnout užitečné informace pro ladění a optimalizaci Graf Uzel a konkrétních podgrafů. Graf Uzel podporuje různé úrovně protokolů prostřednictvím proměnné prostředí `GRAPH_LOG` s následujícími úrovněmi: error, warn, info, debug nebo trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -Kromě toho nastavení `GRAPH_LOG_QUERY_TIMING` na `gql` poskytuje více podrobností o tom, jak dotazy GraphQL probíhají (i když to bude generovat velký objem protokolů). +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### Monitorování & upozornění +#### Monitoring & alerting Graf Uzel poskytuje metriky prostřednictvím koncového bodu Prometheus na portu 8040 ve výchozím nastavení. K vizualizaci těchto metrik lze pak použít nástroj Grafana. -Úložiště indexeru poskytuje [příklad konfigurace Grafana](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman` je nástroj pro údržbu Graf Uzel, který pomáhá s diagnostikou a řešením různých každodenních i výjimečných úloh. +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -Příkaz graphman je součástí oficiálních kontejnerů a můžete jej spustit pomocí docker exec do kontejneru graph-node. Vyžaduje soubor `config.toml`. +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Úplná dokumentace příkazů `graphman` je k dispozici v úložišti Graf Uzel. Viz \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) v Graf Uzel `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Práce s podgrafy @@ -253,7 +253,7 @@ Příkaz graphman je součástí oficiálních kontejnerů a můžete jej spusti API pro stav indexování, které je ve výchozím nastavení dostupné na portu 8030/graphql, nabízí řadu metod pro kontrolu stavu indexování pro různé podgrafy, kontrolu důkazů indexování, kontrolu vlastností podgrafů a další. -Úplné schéma je k dispozici [zde](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Výkonnost indexování @@ -267,8 +267,8 @@ Tyto fáze jsou spojeny do potrubí (tj. mohou být prováděny paralelně), ale Běžné příčiny pomalého indexování: -- Čas potřebný k nalezení relevantních událostí z řetězce (zejména obsluhy volání mohou být pomalé vzhledem k závislosti na `trace_filter`) -- Vytváření velkého počtu `eth_calls` jako součást obslužných +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - Velké množství interakcí s úložištěm během provádění - Velké množství dat k uložení do úložiště - Velký počet událostí ke zpracování @@ -287,19 +287,19 @@ Během indexování mohou dílčí graf selhat, pokud narazí na neočekávaná V některých případech může být chyba řešitelná indexátorem (například pokud je chyba důsledkem toho, že není k dispozici správný typ zprostředkovatele, přidání požadovaného zprostředkovatele umožní pokračovat v indexování). V jiných případech je však nutná změna v kódu podgrafu. -> Deterministická selhání jsou považována za "konečná" a pro selhávající blok je vygenerován důkaz indexace, zatímco nedeterministická selhání nikoli, protože podgraf může selhat a pokračovat v indexaci. V některých případech je nedeterministické označení nesprávné a podgraf chybu nikdy nepřekoná; taková selhání by měla být hlášena jako problémy v úložišti Uzel Graf. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Bloková a volací mezipaměť -Uzel Graf ukládá určitá data do mezipaměti v úložišti, aby se ušetřilo opětovné načítání od zprostředkovatele. Bloky jsou ukládány do mezipaměti, stejně jako výsledky `eth_calls` (ty jsou ukládány do mezipaměti jako konkrétní blok). Toto ukládání do mezipaměti může výrazně zvýšit rychlost indexování při "resynchronizaci" mírně pozměněného podgrafu. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -Pokud však uzel Etherea po určitou dobu poskytoval nesprávná data, mohou se v některých případech dostat do mezipaměti, což může vést k nesprávným datům nebo neúspěšným podgrafům. V takovém případě mohou indexery použít `graphman` k vymazání otrávené cache a následnému přetočení postižených podgrafů, které pak načtou čerstvá data od (doufejme) zdravého poskytovatele. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Pokud existuje podezření na nekonzistenci blokové mezipaměti, například chybějící událost tx receipt: -1. `graphman chain list` pro zjištění názvu řetězce. -2. `graphman chain check-blocks by-number ` zkontroluje, zda blok uložený v mezipaměti odpovídá poskytovateli, a pokud ne, odstraní blok z mezipaměti. - 1. Pokud existuje rozdíl, může být bezpečnější zkrátit celou mezipaměť pomocí `graphman chain truncate `. +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. Pokud se blok shoduje s poskytovatelem, lze problém ladit přímo proti poskytovateli. #### Problémy a chyby při dotazování @@ -312,7 +312,7 @@ Neexistuje jedna "stříbrná kulka", ale celá řada nástrojů pro prevenci, d ##### Ukládání dotazů do mezipaměti -Graf Uzel ve výchozím nastavení ukládá dotazy GraphQL do mezipaměti, což může výrazně snížit zatížení databáze. To lze dále konfigurovat pomocí nastavení `GRAPH_QUERY_CACHE_BLOCKS` a `GRAPH_QUERY_CACHE_MAX_MEM` - více informací [zde](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Analýza dotazů @@ -320,7 +320,7 @@ Problematické dotazy se nejčastěji objevují jedním ze dvou způsobů. V ně V jiných případech může být spouštěcím faktorem vysoké využití paměti v uzlu dotazu a v takovém případě je třeba nejprve identifikovat dotaz, který problém způsobuje. -Indexery mohou používat [qlog](https://github.com/graphprotocol/qlog/) ke zpracování a shrnutí protokolů dotazů uzlu Graf. Lze také povolit funkci `GRAPH_LOG_QUERY_TIMING`, která pomáhá identifikovat a ladit pomalé dotazy. +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. Při pomalém dotazu mají indexátory několik možností. Samozřejmě mohou změnit svůj nákladový model a výrazně zvýšit náklady na odeslání problematického dotazu. To může vést ke snížení četnosti tohoto dotazu. To však často neřeší hlavní příčinu problému. @@ -328,18 +328,18 @@ Při pomalém dotazu mají indexátory několik možností. Samozřejmě mohou z Zdá se, že databázové tabulky, které uchovávají entity, se obecně vyskytují ve dvou variantách: Jsou to tabulky "transakční", kde entity, jakmile jsou jednou vytvořeny, nejsou nikdy aktualizovány, tj. ukládají něco podobného seznamu finančních transakcí, a "účetní", kde jsou entity aktualizovány velmi často, tj. ukládají něco podobného finančním účtům, které se mění při každé zaznamenané transakci. Tabulky podobné účtům se vyznačují tím, že obsahují velké množství verzí entit, ale relativně málo odlišných entit. Často je v takových tabulkách počet odlišných entit 1 % z celkového počtu řádků (verzí entit) -Pro tabulky podobné účtům může `graph-node` generovat dotazy, které využívají podrobnosti o tom, jak Postgres ukládá data s tak vysokou rychlostí změn, totiž že všechny verze pro poslední bloky jsou v malé podsekci celkového úložiště takové tabulky. +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -Příkaz `graphman stats show zobrazí pro každý typ entity/tabulku v nasazení, kolik různých entit a kolik verzí entit každá tabulka obsahuje. Tyto údaje jsou založeny na interních odhadech společnosti Postgres, a proto jsou nutně nepřesné a mohou se lišit o řád. Hodnota `-1` ve sloupci `entity` znamená, že Postgres se domnívá, že všechny řádky obsahují odlišnou entity. +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -Obecně platí, že tabulky, kde počet odlišných entit tvoří méně než 1 % celkového počtu řádků/verzí entit, jsou vhodnými kandidáty pro optimalizaci podobnou účtu. Pokud výstup `graphman stats show` naznačuje, že by tabulka mohla mít z této optimalizace prospěch, spuštění `graphman stats show
    ` provede úplné spočítání tabulky - to může být pomalé, ale poskytuje přesné měření poměru odlišných entit k celkovému počtu verzí entit. +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -Jakmile je tabulka určena jako tabulka typu account, spuštěním příkazu `graphman stats account-like .
    ` se zapne optimalizace typu account pro dotazy proti této tabulce. Optimalizaci lze opět vypnout příkazem `graphman stats account-like --clear .
    ` Trvá až 5 minut, než si uzly dotazů všimnou, že byla optimalizace zapnuta nebo vypnuta. Po zapnutí optimalizace je nutné ověřit, zda změna skutečně nezpůsobí zpomalení dotazů pro danou tabulku. Pokud jste nakonfigurovali Grafanu pro monitorování Postgresu, pomalé dotazy by se objevily v `pg_stat_activity` ve velkých číslech a trvaly by několik sekund. V takovém případě je třeba optimalizaci opět vypnout. +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -U podgrafů typu Uniswap jsou tabulky `pair` a `token` hlavními kandidáty na tuto optimalizaci a mohou mít výrazný vliv na zatížení databáze. +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### Odstranění podgrafů > Jedná se o novou funkci, která bude k dispozici v uzlu Graf 0.29.x -V určitém okamžiku může indexátor chtít daný podgraf odstranit. To lze snadno provést pomocí `graphman drop`, který odstraní nasazení a všechna jeho indexovaná data. Nasazení lze zadat buď jako název podgrafu, nebo jako hash IPFS `Qm..`, nebo jako jmenný prostor databáze `sgdNNN`. Další dokumentace je k dispozici [zde](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 7a8e76db93775db4ae3d4f7b62241823abb7b320 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:26 -0500 Subject: [PATCH 0116/1534] New translations graph-node.mdx (German) --- .../pages/de/indexing/tooling/graph-node.mdx | 116 +++++++++--------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/website/src/pages/de/indexing/tooling/graph-node.mdx b/website/src/pages/de/indexing/tooling/graph-node.mdx index 0a2dda70086d..61827d534c2f 100644 --- a/website/src/pages/de/indexing/tooling/graph-node.mdx +++ b/website/src/pages/de/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Betreiben eines Graph-Knotens +title: Graph Node --- Graph Node ist die Komponente, die Subgrafen indiziert und die resultierenden Daten zur Abfrage über eine GraphQL-API verfügbar macht. Als solches ist es für den Indexer-Stack von zentraler Bedeutung, und der korrekte Betrieb des Graph-Knotens ist entscheidend für den Betrieb eines erfolgreichen Indexers. -Dies bietet eine kontextbezogene Übersicht über Graph Node und einige der erweiterten Optionen, die Indexierern zur Verfügung stehen. Ausführliche Dokumentation und Anleitungen finden Sie im [Graph Node-Repository](https://github.com/graphprotocol/graph-node). +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). -## Der Graph-Knoten +## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) ist die Referenzimplementierung für die Indizierung von Subgrafen auf The Graph Network, die Verbindung zu Blockchain-Clients, die Indizierung von Subgrafen und die Bereitstellung indizierter Daten für Abfragen. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -Graph Node (und der gesamte Indexer-Stack) kann auf Bare-Metal oder in einer Cloud-Umgebung ausgeführt werden. Diese Flexibilität der zentralen Indizierungskomponente ist entscheidend für die Robustheit von The Graph Protocol. In ähnlicher Weise kann Graph Node [aus der Sourcecode erstellt werden](https://github.com/graphprotocol/graph-node), oder Indexer können eines der [bereitgestellte Docker-Images](https:// hub.docker.com/r/graphprotocol/graph-node) benutzen. +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL-Datenbank @@ -32,9 +32,9 @@ Subgraf-Bereitstellungsmetadaten werden im IPFS-Netzwerk gespeichert. Der Graph- Um Überwachung und Berichterstellung zu ermöglichen, kann Graph Node optional Metriken auf einem Prometheus-Metrikserver protokollieren. -### Einstieg in den Sourcecode +### Getting started from source -#### Installieren Sie die Voraussetzungen +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ Um Überwachung und Berichterstellung zu ermöglichen, kann Graph Node optional - **IPFS** -- **Zusätzliche Anforderungen für Ubuntu-Benutzer** - Um einen Graph-Knoten auf Ubuntu auszuführen, sind möglicherweise einige zusätzliche Pakete erforderlich. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Konfiguration +#### Setup -1. Starten Sie einen PostgreSQL-Datenbankserver +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Klonen Sie das [Graph-Knoten](https://github.com/graphprotocol/graph-node)-Repo und erstellen Sie den Sourcecode durch Ausführen von `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Nachdem alle Abhängigkeiten eingerichtet sind, starten Sie den Graph-Knoten: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Erste Schritte mit Kubernetes -Eine vollständige Kubernetes-Beispielkonfiguration finden Sie im [Indexer-Repository](https://github.com/graphprotocol/indexer/tree/main/k8s). +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Ports Wenn es ausgeführt wird, stellt Graph Node die folgenden Ports zur Verfügung: -| Port | Zweck | Routen | CLI-Argument | Umgebungsvariable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP-Server
    (für Subgraf-Abfragen) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (für Subgraf-Abonnements) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (zum Verwalten von Deployments) | / | --admin-port | - | -| 8030 | Subgraf-Indizierungsstatus-API | /graphql | --index-node-port | - | -| 8040 | Prometheus-Metriken | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Wichtig**: Seien Sie vorsichtig, wenn Sie Ports öffentlich zugänglich machen - **Administrationsports** sollten gesperrt bleiben. Dies schließt den JSON-RPC-Endpunkt des Graph-Knotens ein. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Erweiterte Graph-Knoten-Konfiguration In seiner einfachsten Form kann Graph Node mit einer einzelnen Instanz von Graph Node, einer einzelnen PostgreSQL-Datenbank, einem IPFS-Knoten und den Netzwerk-Clients betrieben werden, die von den zu indizierenden Subgrafen benötigt werden. -Dieses Setup kann horizontal skaliert werden, indem mehrere Graph-Knoten und mehrere Datenbanken zur Unterstützung dieser Graph-Knoten hinzugefügt werden. Fortgeschrittene Benutzer möchten vielleicht einige der horizontalen Skalierungsfunktionen von Graph Node sowie einige der fortgeschritteneren Konfigurationsoptionen durch die Datei `config.toml` und die Umgebungsvariablen von Graph Node nutzen. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -Eine [TOML](https://toml.io/en/)-Konfigurationsdatei kann verwendet werden, um komplexere Konfigurationen als die in der CLI bereitgestellten festzulegen. Der Speicherort der Datei wird mit dem Befehlszeilenschalter --config übergeben. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > Bei Verwendung einer Konfigurationsdatei ist es nicht möglich, die Optionen --postgres-url, --postgres-secondary-hosts und --postgres-host-weights zu verwenden. -Eine minimale `config.toml`-Datei kann bereitgestellt werden; Die folgende Datei entspricht der Verwendung der Befehlszeilenoption --postgres-url: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -Die vollständige Dokumentation von `config.toml` finden Sie in den [Graph Node Dokumenten](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Mehrere Graph-Knoten @@ -150,7 +150,7 @@ indexers = [ ] ``` -Weitere Informationen zu Bereitstellungsregeln finden Sie [hier](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Dedizierte Abfrageknoten @@ -175,11 +175,11 @@ Sharding wird nützlich, wenn Ihre vorhandene Datenbank nicht mit der Last Schri Was das Konfigurieren von Verbindungen betrifft, beginnen Sie mit max_connections in postgresql.conf, das auf 400 (oder vielleicht sogar 200) eingestellt ist, und sehen Sie sich die Prometheus-Metriken store_connection_wait_time_ms und store_connection_checkout_count an. Spürbare Wartezeiten (alles über 5 ms) sind ein Hinweis darauf, dass zu wenige Verbindungen verfügbar sind; hohe Wartezeiten werden auch dadurch verursacht, dass die Datenbank sehr ausgelastet ist (z. B. hohe CPU-Last). Wenn die Datenbank jedoch ansonsten stabil erscheint, weisen hohe Wartezeiten darauf hin, dass die Anzahl der Verbindungen erhöht werden muss. In der Konfiguration ist die Anzahl der Verbindungen, die jede Graph-Knoten-Instanz verwenden kann, eine Obergrenze, und der Graph-Knoten hält Verbindungen nicht offen, wenn er sie nicht benötigt. -Weitere Informationen zur Speicherkonfiguration finden Sie [hier](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Lesen Sie mehr über Lesen Sie [hier] mehr über die Store-Konfiguration [hier] (https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Dedizierte Blockaufnahme -Wenn mehrere Knoten konfiguriert sind, muss ein Knoten angegeben werden, der für die Aufnahme neuer Blöcke verantwortlich ist, damit nicht alle konfigurierten Indexknoten den Kettenkopf abfragen. Dies geschieht als Teil des `chains`-Namespace, der die `node_id` angibt, die für die Blockaufnahme verwendet werden soll: +Wenn mehrere Knoten konfiguriert sind, muss ein Knoten angegeben werden, der für die Aufnahme neuer Blöcke verantwortlich ist, damit nicht alle konfigurierten Indexer-Knoten den Kettenkopf abfragen. Dies geschieht als Teil des `chains`-Namensraumes, indem die `node_id` angegeben wird, die für die Aufnahme von Blöcken verwendet werden soll: ```toml [chains] @@ -194,7 +194,7 @@ Das Graph-Protokoll erhöht die Anzahl der Netzwerke, die für die Indizierung v - Mehrere Anbieter pro Netzwerk (dies kann eine Aufteilung der Last auf Anbieter ermöglichen und kann auch die Konfiguration von vollständigen Knoten sowie Archivknoten ermöglichen, wobei Graph Node günstigere Anbieter bevorzugt, wenn eine bestimmte Arbeitslast dies zulässt). - Zusätzliche Anbieterdetails, wie Funktionen, Authentifizierung und Anbietertyp (für experimentelle Firehose-Unterstützung) -The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. +Der Abschnitt `[chains]` steuert die Ethereum-Provider, mit denen sich graph-node verbindet, und wo Blöcke und andere Metadaten für jede Kette gespeichert werden. Das folgende Datenbeispiel konfiguriert zwei Ketten, Mainnet und Kovan, wobei die Blöcke für mainnet im vip-Shard und die Blöcke für kovan im primary-Shard gespeichert werden. Die Mainnet-Kette kann zwei verschiedene Anbieter verwenden, während Kovan nur einen Anbieter hat. ```toml [chains] @@ -210,42 +210,42 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Weitere Informationen zur Anbieterkonfiguration finden Sie [hier](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Lesen Sie mehr über die Providerkonfiguration [hier] (https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Umgebungsvariablen -Graph Node unterstützt eine Reihe von Umgebungsvariablen, die Funktionen aktivieren oder das Verhalten von Graph Node ändern können. Diese sind [hier](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) dokumentiert. +Graph Node unterstützt eine Reihe von Umgebungsvariablen, die Funktionen aktivieren oder das Verhalten von Graph Node ändern können. Diese sind [hier] dokumentiert (https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Kontinuierlicher Einsatz Benutzer, die ein skaliertes Indizierungs-Setup mit erweiterter Konfiguration betreiben, können von der Verwaltung ihrer Graph-Knoten mit Kubernetes profitieren. -- Das Indexer-Repository enthält eine [Beispielreferenz für Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. +- Das Indexer-Repository hat eine [Beispiel-Kubernetes-Referenz](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad] (https://docs.graphops.xyz/launchpad/intro) ist ein Toolkit für den Betrieb eines Graph Protocol Indexer auf Kubernetes, das von GraphOps gepflegt wird. Es bietet eine Reihe von Helm-Diagrammen und eine CLI zur Verwaltung eines Graph Node- Deployments. ### Managing Graph Node Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. -#### Logging +#### Protokollierung -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Die Protokolle von Graph Node können nützliche Informationen für die Debuggen und Optimierung von Graph Node und bestimmten Subgraphen liefern. Graph Node unterstützt verschiedene Log-Ebenen über die Umgebungsvariable `GRAPH_LOG`, mit den folgenden Ebenen: Fehler, Warnung, Info, Debug oder Trace. -Außerdem bietet das Festlegen von `GRAPH_LOG_QUERY_TIMING` auf `gql` weitere Details darüber, wie GraphQL-Abfragen ausgeführt werden (obwohl dies eine große Menge an Protokollen generieren wird). +Wenn Sie außerdem `GRAPH_LOG_QUERY_TIMING` auf `gql` setzen, erhalten Sie mehr Details darüber, wie GraphQL-Abfragen ausgeführt werden (allerdings wird dadurch eine große Menge an Protokollen erzeugt). -#### Überwachung & Warnungen +#### Überwachung und Alarmierung Graph Node stellt die Metriken standardmäßig durch den Prometheus-Endpunkt am Port 8040 bereit. Grafana kann dann zur Visualisierung dieser Metriken verwendet werden. -Das Indexer-Repository bietet eine [Beispielkonfiguration für Grafana](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +Das Indexer Repository bietet eine [Beispiel-Grafana-Konfiguration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman` ist ein Wartungstool für Graph Node, das bei der Diagnose und Lösung verschiedener alltäglicher und außergewöhnlicher Aufgaben hilft. +`graphman` ist ein Wartungswerkzeug für Graph Node, das bei der Diagnose und Lösung verschiedener alltäglicher und außergewöhnlicher Aufgaben hilft. -The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. +Der Befehl graphman ist in den offiziellen Containern enthalten, und Sie können ihn mit docker exec in Ihrem graph-node-Container ausführen. Er erfordert eine Datei `config.toml`. -Eine vollständige Dokumentation der `graphman`-Befehle ist im Graph Node-Repository verfügbar. Siehe \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) im Graph Node `/docs` +Eine vollständige Dokumentation der `graphman`-Befehle ist im Graph Node Repository verfügbar. Siehe [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) im Graph Node `/docs` ### Working with subgraphs @@ -253,7 +253,7 @@ Eine vollständige Dokumentation der `graphman`-Befehle ist im Graph Node-Reposi Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. -The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +Das vollständige Schema ist [hier] verfügbar (https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Indexing performance @@ -267,8 +267,8 @@ These stages are pipelined (i.e. they can be executed in parallel), but they are Common causes of indexing slowness: -- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) -- Making large numbers of `eth_calls` as part of handlers +- Zeit, die benötigt wird, um relevante Ereignisse aus der Kette zu finden (insbesondere Call-Handler können langsam sein, da sie auf `trace_filter` angewiesen sind) +- Durchführen einer großen Anzahl von „eth_calls“ als Teil von Handlern - A large amount of store interaction during execution - A large amount of data to save to the store - A large number of events to process @@ -287,19 +287,19 @@ During indexing subgraphs might fail, if they encounter data that is unexpected, In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministische Fehler werden als „endgültig“ betrachtet, wobei für den fehlgeschlagenen Block ein Indizierungsnachweis generiert wird, während nicht-deterministische Fehler nicht als solche betrachtet werden, da es dem Subgraph gelingen kann, „auszufallen“ und die Indizierung fortzusetzen. In einigen Fällen ist das nicht-deterministische Label falsch und der Subgraph wird den Fehler nie überwinden; solche Fehler sollten als Probleme im Graph Node Repository gemeldet werden. #### Block and call cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node speichert bestimmte Daten im Zwischenspeicher, um ein erneutes Abrufen vom Anbieter zu vermeiden. Blöcke werden zwischengespeichert, ebenso wie die Ergebnisse von `eth_calls` (letztere werden ab einem bestimmten Block zwischengespeichert). Diese Zwischenspeicherung kann die Indizierungsgeschwindigkeit bei der „Neusynchronisierung“ eines geringfügig veränderten Subgraphen drastisch erhöhen. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +Wenn jedoch ein Ethereum-Knoten über einen bestimmten Zeitraum falsche Daten geliefert hat, können diese in den Cache gelangen und zu falschen Daten oder fehlgeschlagenen Subgraphen führen. In diesem Fall können Indexer `graphman` verwenden, um den vergifteten Cache zu löschen und dann die betroffenen Subgraphen zurückzuspulen, die dann frische Daten von dem (hoffentlich) gesunden Anbieter abrufen. If a block cache inconsistency is suspected, such as a tx receipt missing event: -1. `graphman chain list` to find the chain name. -2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. - 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. +1. `graphman chain list`, um den Namen der Kette zu finden. +2. `graphman chain check-blocks by-number ` prüft, ob der zwischengespeicherte Block mit dem Anbieter übereinstimmt, und löscht den Block aus dem Cache, wenn dies nicht der Fall ist. + 1. Wenn es einen Unterschied gibt, kann es sicherer sein, den gesamten Cache mit `graphman chain truncate ` abzuschneiden. 2. If the block matches the provider, then the issue can be debugged directly against the provider. #### Querying issues and errors @@ -312,7 +312,7 @@ There is not one "silver bullet", but a range of tools for preventing, diagnosin ##### Query caching -Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Node zwischenspeichert GraphQL-Abfragen standardmäßig, was die Datenbanklast erheblich reduzieren kann. Dies kann mit den Einstellungen `GRAPH_QUERY_CACHE_BLOCKS` und `GRAPH_QUERY_CACHE_MAX_MEM` weiter konfiguriert werden - lesen Sie mehr [hier](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Analysing queries @@ -320,7 +320,7 @@ Problematic queries most often surface in one of two ways. In some cases, users In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. -Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. +Indexer können [qlog](https://github.com/graphprotocol/qlog/) verwenden, um die Abfrageprotokolle von Graph Node zu verarbeiten und zusammenzufassen. `GRAPH_LOG_QUERY_TIMING` kann auch aktiviert werden, um langsame Abfragen zu identifizieren und zu debuggen. Given a slow query, indexers have a few options. Of course they can alter their cost model, to significantly increase the cost of sending the problematic query. This may result in a reduction in the frequency of that query. However this often doesn't resolve the root cause of the issue. @@ -328,18 +328,18 @@ Given a slow query, indexers have a few options. Of course they can alter their Database tables that store entities seem to generally come in two varieties: 'transaction-like', where entities, once created, are never updated, i.e., they store something akin to a list of financial transactions, and 'account-like' where entities are updated very often, i.e., they store something like financial accounts that get modified every time a transaction is recorded. Account-like tables are characterized by the fact that they contain a large number of entity versions, but relatively few distinct entities. Often, in such tables the number of distinct entities is 1% of the total number of rows (entity versions) -For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. +Für kontoähnliche Tabellen kann `graph-node` Abfragen generieren, die sich die Details zunutze machen, wie Postgres Daten mit einer so hohen Änderungsrate speichert, nämlich dass alle Versionen für die jüngsten Blöcke in einem kleinen Teil des Gesamtspeichers für eine solche Tabelle liegen. -The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. +Der Befehl `graphman stats show ` zeigt für jeden Entitätstyp/jede Tabelle in einem Einsatz an, wie viele unterschiedliche Entitäten und wie viele Entitätsversionen jede Tabelle enthält. Diese Daten beruhen auf Postgres-internen Schätzungen und sind daher notwendigerweise ungenau und können um eine Größenordnung abweichen. Ein `-1` in der Spalte `entities` bedeutet, dass Postgres davon ausgeht, dass alle Zeilen eine eindeutige Entität enthalten. -In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. +Im Allgemeinen sind Tabellen, bei denen die Anzahl der unterschiedlichen Entitäten weniger als 1 % der Gesamtzahl der Zeilen/Entitätsversionen beträgt, gute Kandidaten für die kontoähnliche Optimierung. Wenn die Ausgabe von `graphman stats show` darauf hindeutet, dass eine Tabelle von dieser Optimierung profitieren könnte, führt `graphman stats show
    ` eine vollständige Zählung der Tabelle durch - das kann langsam sein, liefert aber ein genaues Maß für das Verhältnis von eindeutigen Entitäten zu den gesamten Entitätsversionen. -Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. +Sobald eine Tabelle als „kontoähnlich“ eingestuft wurde, wird durch die Ausführung von `graphman stats account-like .
    ` die kontoähnliche Optimierung für Abfragen auf diese Tabelle aktiviert. Die Optimierung kann mit `graphman stats account-like --clear .
    ` wieder ausgeschaltet werden. Es dauert bis zu 5 Minuten, bis die Abfrageknoten merken, dass die Optimierung ein- oder ausgeschaltet wurde. Nach dem Einschalten der Optimierung muss überprüft werden, ob die Abfragen für diese Tabelle durch die Änderung nicht tatsächlich langsamer werden. Wenn Sie Grafana für die Überwachung von Postgres konfiguriert haben, würden langsame Abfragen in `pg_stat_activity` in großer Zahl angezeigt werden und mehrere Sekunden dauern. In diesem Fall muss die Optimierung wieder abgeschaltet werden. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +Bei Uniswap-ähnlichen Subgraphen sind die `pair`- und `token`-Tabellen die Hauptkandidaten für diese Optimierung und können die Datenbankauslastung erheblich beeinflussen. #### Removing subgraphs > This is new functionality, which will be available in Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +Irgendwann möchte ein Indexer vielleicht einen bestimmten Subgraph entfernen. Das kann einfach mit `graphman drop` gemacht werden, das einen Einsatz und alle indizierten Daten löscht. Der Einsatz kann entweder als Subgraph-Name, als IPFS-Hash `Qm..` oder als Datenbank-Namensraum `sgdNNN` angegeben werden. Weitere Dokumentation ist [hier](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) verfügbar. From aeac9494ffb50c2789118cdfc4df46530af8f7ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:27 -0500 Subject: [PATCH 0117/1534] New translations graph-node.mdx (Italian) --- .../pages/it/indexing/tooling/graph-node.mdx | 122 +++++++++--------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/website/src/pages/it/indexing/tooling/graph-node.mdx b/website/src/pages/it/indexing/tooling/graph-node.mdx index b3b3ca24e204..1675d7814306 100644 --- a/website/src/pages/it/indexing/tooling/graph-node.mdx +++ b/website/src/pages/it/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Come lavorare con Graph Node +title: Graph Node --- Graph Node è il componente che indica i subgraph e rende i dati risultanti disponibili per l'interrogazione tramite API GraphQL. È quindi centrale per lo stack degli indexer, ed inoltre il corretto funzionamento di Graph Node è cruciale per il buon funzionamento di un indexer di successo. -Questo fornisce una panoramica contestuale di Graph Node e alcune delle opzioni più avanzate disponibili per gli Indexer. La documentazione e le istruzioni dettagliate si trovano nel [repository di Graph Node](https://github.com/graphprotocol/graph-node). +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) è l'implementazione di riferimento per l'indicizzazione dei subgraph su Graph Network, la connessione ai client blockchain, l'indicizzazione dei subgraph e la disponibilità dei dati indicizzati per le query. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -Graph Node (e l'intero stack di indicizzatori) può essere eseguito su metallo nudo o in un ambiente cloud. Questa flessibilità della componente centrale di indicizzazione è fondamentale per la robustezza del protocollo The Graph. Allo stesso modo, Graph Node può essere [costruito dai sorgenti](https://github.com/graphprotocol/graph-node), oppure gli indexer possono usare una delle [immagini Docker fornite](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### Database PostgreSQL @@ -20,9 +20,9 @@ Graph Node (e l'intero stack di indicizzatori) può essere eseguito su metallo n Per indicizzare una rete, Graph Node deve accedere a un cliente di rete tramite un'API JSON-RPC compatibile con EVM. Questo RPC può connettersi a un singolo cliente o può essere una configurazione più complessa che bilancia il carico su più clienti. -Mentre alcuni subgraph possono richiedere solo un nodo completo, alcuni possono avere caratteristiche di indicizzazione che richiedono funzionalità RPC aggiuntive. In particolare, i subgraph che effettuano `eth_call` come parte dell'indicizzazione richiedono un nodo archivio che supporti [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) e i subgraph con `callHandlers` o `blockHandlers` con filtro `call` richiedono il supporto `trace_filter` ([vedere la documentazione del modulo trace qui](https://openethereum.github.io/JSONRPC-trace-module)). +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Firehose di rete** - un Firehose è un servizio gRPC che fornisce un flusso ordinato, ma consapevole dei blocchi, sviluppato dagli sviluppatori di The Graph per supportare meglio l'indicizzazione performante su scala. Al momento non è un requisito per gli Indexer, ma questi ultimi sono incoraggiati a familiarizzare con la tecnologia, prima del supporto completo della rete. Per saperne di più sul Firehose [qui](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### Nodi IPFS @@ -32,9 +32,9 @@ I metadati di distribuzione del subgraph sono memorizzati sulla rete IPFS. The G Per consentire il monitoraggio e la creazione di report, Graph Node può opzionalmente registrare le metriche su un server di metriche Prometheus. -### Iniziare dalla sorgente +### Getting started from source -#### Installare i prerequisiti +#### Install prerequisites - **Rust** @@ -42,7 +42,7 @@ Per consentire il monitoraggio e la creazione di report, Graph Node può opziona - **IPFS** -- **Requisiti aggiuntivi per gli utenti di Ubuntu** - Per eseguire un Graph Node su Ubuntu potrebbero essere necessari alcuni pacchetti aggiuntivi. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpq-dev libssl-dev pkg-config #### Setup -1. Avviare un server di database PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clonare la repository di [Graph Node](https://github.com/graphprotocol/graph-node) e costruire il sorgente eseguendo `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Ora che tutte le dipendenze sono state configurate, avviare il Graph Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Come iniziare con Kubernetes -Un esempio completo di configurazione Kubernetes si trova nel [repository indexer](https://github.com/graphprotocol/indexer/tree/main/k8s). +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Porti Quando è in funzione, Graph Node espone le seguenti porte: -| Porta | Obiettivo | Routes | Argomento CLI | Variabile d'ambiente | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (per le query di subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (per le sottoscrizioni ai subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (per la gestione dei deployment) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Metriche di Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Importante**: fare attenzione a esporre le porte pubblicamente - le porte di **amministrazione** devono essere tenute sotto chiave. Questo include l'endpoint JSON-RPC del Graph Node. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Configurazione avanzata del Graph Node Nella sua forma più semplice, Graph Node può essere utilizzato con una singola istanza di Graph Node, un singolo database PostgreSQL, un nodo IPFS e i client di rete richiesti dai subgraph da indicizzare. -Questa configurazione può essere scalata orizzontalmente, aggiungendo più Graph Node e più database per supportare tali Graph Node. Gli utenti avanzati potrebbero voler sfruttare alcune delle capacità di scalatura orizzontale di Graph Node, nonché alcune delle opzioni di configurazione più avanzate, tramite il file `config.toml` e le variabili d'ambiente di Graph Node. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -Un file di configurazione [TOML](https://toml.io/en/) può essere usato per impostare configurazioni più complesse di quelle esposte nella CLI. Il percorso del file viene passato con l'opzione --config della riga di comando. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > Quando si usa un file di configurazione, non è possibile usare le opzioni --postgres-url, --postgres-secondary-hosts e --postgres-host-weights. -È possibile fornire un file `config.toml` minimo; il file seguente è equivalente all'uso dell'opzione della riga di comando --postgres-url: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -La documentazione completa di `config.toml` si trova nei documenti di [Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Graph Node multipli @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### Regole di distribuzione -Dati più Graph Node, è necessario gestire la distribuzione di nuovi subgraph in modo che lo stesso subgraph non venga indicizzato da due nodi diversi, il che porterebbe a collisioni. Questo può essere fatto usando le regole di distribuzione, che possono anche specificare in quale `shard` devono essere memorizzati i dati di un subgraph, se si usa lo sharding del database. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. Esempio di configurazione della regola di distribuzione: @@ -150,7 +150,7 @@ indicizzatori = [ ] ``` -Per saperne di più sulle regole di distribuzione [qui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Nodi di query dedicati @@ -167,7 +167,7 @@ Ogni nodo il cui --node-id corrisponde all'espressione regolare sarà impostato Per la maggior parte dei casi d'uso, un singolo database Postgres è sufficiente per supportare un'istanza del graph-node. Quando un'istanza del graph-node supera un singolo database Postgres, è possibile suddividere l'archiviazione dei dati del graph-node su più database Postgres. Tutti i database insieme formano lo store dell'istanza del graph-node. Ogni singolo database è chiamato shard. -Gli shard possono essere utilizzati per suddividere le distribuzioni di subgraph su più database e per utilizzare le repliche per distribuire il carico delle query tra i database. Questo include la configurazione del numero di connessioni al database disponibili che ogni `graph-node` deve mantenere nel suo pool di connessioni per ogni database, cosa che diventa sempre più importante quando si indicizzano più subgraph. +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. Lo sharding diventa utile quando il database esistente non riesce a reggere il carico che Graph Node gli impone e quando non è più possibile aumentare le dimensioni del database. @@ -175,11 +175,11 @@ Lo sharding diventa utile quando il database esistente non riesce a reggere il c Per quanto riguarda la configurazione delle connessioni, iniziare con max_connections in postgresql.conf impostato a 400 (o forse anche a 200) e osservare le metriche di Prometheus store_connection_wait_time_ms e store_connection_checkout_count. Tempi di attesa notevoli (qualsiasi cosa superiore a 5 ms) indicano che le connessioni disponibili sono troppo poche; tempi di attesa elevati possono anche essere causati da un database molto occupato (come un elevato carico della CPU). Tuttavia, se il database sembra altrimenti stabile, tempi di attesa elevati indicano la necessità di aumentare il numero di connessioni. Nella configurazione, il numero di connessioni che ogni istanza del graph-node può utilizzare è un limite massimo e Graph Node non manterrà aperte le connessioni se non ne ha bisogno. -Per saperne di più sulla configurazione dell'archivio [qui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Ingestione di blocchi dedicati -Se sono stati configurati più nodi, sarà necessario specificare un nodo responsabile dell'ingestione dei nuovi blocchi, in modo che tutti i nodi indice configurati non eseguano il polling della testa della chain. Questo viene fatto come parte dello spazio dei nomi `chains`, specificando il `node_id` da usare per l'ingestione dei blocchi: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### Supporto di più reti -Il Graph Protocol sta aumentando il numero di reti supportate per l'indicizzazione delle ricompense ed esistono molti subgraph che indicizzano reti non supportate che un indexer vorrebbe elaborare. Il file `config.toml` consente una configurazione espressiva e flessibile di: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Reti multiple - Fornitori multipli per rete (questo può consentire di suddividere il carico tra i fornitori e di configurare nodi completi e nodi di archivio, con Graph Node che preferisce i fornitori più economici se un determinato carico di lavoro lo consente). - Ulteriori dettagli sul provider, come le caratteristiche, l'autenticazione e il tipo di provider (per il supporto sperimentale di Firehose) -La sezione `[chains]` controlla i fornitori di ethereum a cui graph-node si connette e dove vengono memorizzati i blocchi e altri metadati per ogni chain. L'esempio seguente configura due chain, mainnet e kovan, dove i blocchi per mainnet sono memorizzati nello shard vip e quelli per kovan nello shard primario. La chain mainnet può utilizzare due diversi provider, mentre kovan ha un solo provider. +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [catene] @@ -210,42 +210,42 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Per saperne di più sulla configurazione dei provider [qui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Variabili d'ambiente -Graph Node supporta una serie di variabili d'ambiente che possono abilitare funzioni o modificare il comportamento di Graph Node. Queste sono documentate [qui](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Distribuzione continua Gli utenti che gestiscono una configurazione di indicizzazione scalare con una configurazione avanzata possono trarre vantaggio dalla gestione dei Graph Node con Kubernetes. -- Il repository dell'indexer ha un [esempio di riferimento Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) è un kit di strumenti per l'esecuzione di un Graph Protocol Indexer su Kubernetes, gestito da GraphOps. Fornisce una serie di grafici Helm e una CLI per gestire una distribuzione di Graph Node. +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### Gestione del Graph Node Dato un Graph Node (o più Graph Nodes!) in funzione, la sfida consiste nel gestire i subgraph distribuiti tra i nodi. Graph Node offre una serie di strumenti che aiutano a gestire i subgraph. -#### Registrazione +#### Logging -I registri di Graph Node possono fornire informazioni utili per il debug e l'ottimizzazione di Graph Node e di specifici subgraph. Graph Node supporta diversi livelli di log tramite la variabile d'ambiente `GRAPH_LOG`, con i seguenti livelli: error, warn, info, debug o trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -Inoltre, impostando `GRAPH_LOG_QUERY_TIMING` su `gql` si ottengono maggiori dettagli sull'esecuzione delle query GraphQL (anche se questo genera un grande volume di log). +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### Monitoraggio e allerta +#### Monitoring & alerting Graph Node fornisce le metriche tramite l'endpoint Prometheus sulla porta 8040. È possibile utilizzare Grafana per visualizzare queste metriche. -Il repository dell'indexer fornisce un [esempio di configurazione di Grafana](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman` è uno strumento di manutenzione per Graph Node, che aiuta nella diagnosi e nella risoluzione di diversi compiti quotidiani ed eccezionali. +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -Il comando graphman è incluso nei contenitori ufficiali e si può eseguire con docker exec nel contenitore graph-node. Richiede un file `config.toml`. +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -La documentazione completa dei comandi di `graphman` è disponibile nel repository di Graph Node. Vedere \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) nel Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Lavorare con i subgraph @@ -253,7 +253,7 @@ La documentazione completa dei comandi di `graphman` è disponibile nel reposito Disponibile sulla porta 8030/graphql per impostazione predefinita, l'API dello stato di indicizzazione espone una serie di metodi per verificare lo stato di indicizzazione di diversi subgraph, controllare le prove di indicizzazione, ispezionare le caratteristiche dei subgraph e altro ancora. -Lo schema completo è disponibile [qui](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Prestazioni di indicizzazione @@ -267,8 +267,8 @@ Questi stadi sono collegati tra loro (cioè possono essere eseguiti in parallelo Cause comuni di lentezza dell'indicizzazione: -- Tempo impiegato per trovare eventi rilevanti dalla chain (i gestori di chiamate in particolare possono essere lenti, dato che si affidano a `trace_filter`) -- Effettuare un gran numero di `eth_calls` come parte dei gestori +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - Una grande quantità di interazioni con l'archivio durante l'esecuzione - Una grande quantità di dati da salvare nell'archivio - Un numero elevato di eventi da elaborare @@ -287,19 +287,19 @@ Durante l'indicizzazione, i subgraph possono fallire se incontrano dati inaspett In alcuni casi, un errore può essere risolto dall'indexer (ad esempio, se l'errore è dovuto alla mancanza del tipo di provider giusto, l'aggiunta del provider richiesto consentirà di continuare l'indicizzazione). In altri casi, invece, è necessario modificare il codice del subgraph. -> I fallimenti deterministici sono considerati "definitivi", con la generazione di una Prova di Indicizzazione per il blocco fallito, mentre i fallimenti non deterministici non lo sono, in quanto il subgraph può riuscire a "non fallire" e continuare l'indicizzazione. In alcuni casi, l'etichetta non deterministica non è corretta e il subgraph non supererà mai l'errore; tali fallimenti devono essere segnalati come problemi sul repository di Graph Node. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Cache dei blocchi e delle chiamate -Graph Node memorizza nella cache alcuni dati nell'archiivio, per risparmiare il refetching dal provider. I blocchi sono memorizzati nella cache, così come i risultati delle chiamate `eth_call` (queste ultime sono memorizzate nella cache a partire da un blocco specifico). Questa cache può aumentare notevolmente la velocità di indicizzazione durante la "risincronizzazione" di un subgraph leggermente modificato. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -Tuttavia, in alcuni casi, se un nodo Ethereum ha fornito dati non corretti per un certo periodo, questi possono entrare nella cache, causando dati errati o subgraph falliti. In questo caso gli indexer possono usare `graphman` per cancellare la cache avvelenata e quindi riavvolgere i subgraph interessati, che recupereranno quindi dati freschi dal provider (auspicabilmente) sano. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Se si sospetta un'incongruenza nella cache a blocchi, come ad esempio un evento di ricezione tx mancante: -1. `elenco chain di graphman` per trovare il nome della chain. -2. `chain graphman check-blocks by-number ` controlla se il blocco in cache corrisponde al fornitore e, in caso contrario, lo cancella dalla cache. - 1. Se c'è una differenza, può essere più sicuro troncare l'intera cache con `graphman chain truncate `. +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. Se il blocco corrisponde al provider, è possibile eseguire il debug del problema direttamente sul provider. #### Problemi ed errori di query @@ -312,7 +312,7 @@ Non esiste una "pallottola d'argento", ma una serie di strumenti per prevenire, ##### Caching delle query -Graph Node memorizza nella cache le query GraphQL per impostazione predefinita, riducendo in modo significativo il carico del database. Questo può essere ulteriormente configurato con impostazioni `GRAPH_QUERY_CACHE_BLOCKS` e `GRAPH_QUERY_CACHE_MAX_MEM` - per saperne di più [qui](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Analisi delle query @@ -320,7 +320,7 @@ Le query problematiche emergono spesso in due modi. In alcuni casi, sono gli ste In altri casi, il fattore scatenante potrebbe essere l'elevato utilizzo della memoria su un nodo di query, nel qual caso la sfida consiste nell'identificare la query che causa il problema. -Gli indexer possono usare [qlog](https://github.com/graphprotocol/qlog/) per elaborare e riassumere i log delle query di Graph Node. Si può anche attivare `GRAPH_LOG_QUERY_TIMING` per aiutare a identificare e debuggare le query lente. +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. Con una query lenta, gli indexer hanno alcune opzioni. Naturalmente possono modificare il loro modello di costo, aumentando in modo significativo il costo di invio della query problematica. Questo può portare a una riduzione della frequenza della query. Tuttavia, questo spesso non risolve la causa principale del problema. @@ -328,18 +328,18 @@ Con una query lenta, gli indexer hanno alcune opzioni. Naturalmente possono modi Le tabelle di database che memorizzano le entità sembrano essere generalmente di due tipi: "tipo transazioni", in cui le entità, una volta create, non vengono mai aggiornate, cioè memorizzano qualcosa di simile a un elenco di transazioni finanziarie, e "tipo account", in cui le entità vengono aggiornate molto spesso, cioè memorizzano qualcosa di simile a conti finanziari che vengono modificati ogni volta che viene registrata una transazione. Le tabelle di tipo account sono caratterizzate dal fatto di contenere un gran numero di versioni di entità, ma relativamente poche entità distinte. Spesso, in queste tabelle il numero di entità distinte è pari all'1% del numero totale di righe (versioni di entità) -Per le tabelle di tipo account, `graph-node` può generare query che sfruttano i dettagli del modo in cui Postgres finisce per memorizzare i dati con un tasso di modifica così elevato, ovvero che tutte le versioni per i blocchi recenti si trovano in una piccola sottosezione dello spazio di archiviazione complessivo di una tabella. +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -Il comando `graphman stats show mostra, per ogni tipo di entità/tabella in una distribuzione, quante entità distinte e quante versioni di entità contiene ogni tabella. Questi dati si basano su stime interne a Postgres e sono quindi necessariamente imprecisi e possono essere sbagliati di un ordine di grandezza. Un `-1` nella colonna `entità` significa che Postgres ritiene che tutte le righe contengano un'entità distinta. +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -In generale, le tabelle in cui il numero di entità distinte è inferiore all'1% del numero totale di righe/versioni di entità sono buone candidate per l'ottimizzazione di tipo account. Quando l'output di `graphman stats show` indica che una tabella potrebbe beneficiare di questa ottimizzazione, l'esecuzione di `graphman stats show
    ` eseguirà un conteggio completo della tabella - che può essere lento, ma fornisce una misura precisa del rapporto tra entità distinte e versioni complessive delle entità. +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -Una volta che una tabella è stata determinata come tipo account, l'esecuzione di `graphman stats tipo account .
    ` attiverà l'ottimizzazione tipo account per le query contro quella tabella. L'ottimizzazione può essere nuovamente disattivata con `graphman stats tipo account --clear .
    `. Ci vogliono fino a 5 minuti prima che i nodi delle query notino che l'ottimizzazione è stata attivata o disattivata. Dopo aver attivato l'ottimizzazione, è necessario verificare che la modifica non renda effettivamente più lente le query per quella tabella. Se si è configurato Grafana per monitorare Postgres, le query lente verrebbero visualizzate in `pg_stat_activity` in gran numero, impiegando diversi secondi. In questo caso, l'ottimizzazione deve essere nuovamente disattivata. +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -Per i subgraph simili a Uniswap, le tabelle `pair` e `token` sono le prime candidate per questa ottimizzazione e possono avere un effetto drammatico sul carico del database. +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### Rimozione dei subgraph > Si tratta di una nuova funzionalità, che sarà disponibile in Graph Node 0.29.x -A un certo punto un indexer potrebbe voler rimuovere un determinato subgraph. Questo può essere fatto facilmente tramite `graphman drop`, che cancella una distribuzione e tutti i suoi dati indicizzati. La distribuzione può essere specificata come un nome di subgraph, un hash IPFS `Qm..`, o lo spazio dei nomi del database `sgdNNN`. È disponibile ulteriore documentazione [qui](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 2d8503feaf15acdae115c0c6792849b955e1a59c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:28 -0500 Subject: [PATCH 0118/1534] New translations graph-node.mdx (Japanese) --- .../pages/ja/indexing/tooling/graph-node.mdx | 122 +++++++++--------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/website/src/pages/ja/indexing/tooling/graph-node.mdx b/website/src/pages/ja/indexing/tooling/graph-node.mdx index cb9e4f14e8f3..935603e4dfd0 100644 --- a/website/src/pages/ja/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ja/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: オペレーティンググラフノード +title: グラフノード --- グラフノードはサブグラフのインデックスを作成し、得られたデータをGraphQL API経由でクエリできるようにするコンポーネントです。そのため、インデクサースタックの中心的存在であり、グラフノードの正しい動作はインデクサーを成功させるために非常に重要です。 -これは、グラフノードの文脈的な概要と、インデクサーに利用可能なより高度なオプションのいくつかを提供します。詳細なドキュメントと説明は [Graph Node リポジトリ](https://github.com/graphprotocol/graph-node)で見ることができます。 +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## グラフノード -[Graph Node](https://github.com/graphprotocol/graph-node)は、グラフネットワーク上のサブグラフにインデックスを付け、ブロックチェーンクライアントに接続し、サブグラフにインデックスを付け、インデックス付けしたデータをクエリで利用できるようにするためのリファレンス実装です。 +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -グラフノード(およびインデクサスタック全体)は、ベアメタルでもクラウド環境でも実行可能です。中央のインデックス作成コンポーネントのこの柔軟性は、グラフプロトコルの堅牢性にとって非常に重要です。同様に、グラフノードは[ソースからビルドすることができ](https://github.com/graphprotocol/graph-node)、インデクサーは[提供されたDocker Image](https://hub.docker.com/r/graphprotocol/graph-node)の1つを使用することができます。また +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQLデータベース @@ -20,21 +20,21 @@ title: オペレーティンググラフノード ネットワークにインデックスを付けるために、グラフ ノードは EVM 互換の JSON-RPC API を介してネットワーク クライアントにアクセスする必要があります。この RPC は単一のクライアントに接続する場合もあれば、複数のクライアントに負荷を分散するより複雑なセットアップになる場合もあります。 -一部のサブグラフは完全なノードのみを必要とする場合がありますが、一部のサブグラフには追加の RPC 機能を必要とするインデックス機能が含まれる場合があります。具体的には、インデックス作成の一部として `eth_calls` を作成するサブグラフには、[EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) をサポートするアーカイブ ノードが必要になります。、および `callHandlers` を持つサブグラフ、または `call` フィルタを持つ `blockHandlers` には、`trace_filter` サポートが必要です ([トレース モジュールのドキュメントはこちら](https://openethereum.github.io/JSONRPC-trace-module))。 +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**近日公開です。Network Firehoses** - Firehose は、順序付けられた、しかしフォークを意識したブロックのストリームを提供する gRPC サービスで、The Graph のコア開発者により、大規模で高性能なインデックス作成をより良くサポートするために開発されました。これは現在、インデクサーの要件ではありませんが、インデクサーは、ネットワークの完全サポートに先立って、この技術に慣れることが推奨されています。Firehose の詳細については、[こちら](https://firehose.streamingfast.io/)を参照してください。 +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFSノード -IPFS ノード(バージョン 未満) - サブグラフのデプロイメタデータは IPFS ネットワーク上に保存されます。 グラフノードは、サブグラフのデプロイ時に主に IPFS ノードにアクセスし、サブグラフマニフェストと全てのリンクファイルを取得します。 ネットワーク・インデクサーは独自の IPFS ノードをホストする必要はありません。 ネットワーク用の IPFS ノードは、https://ipfs.network.thegraph.com でホストされています。 +IPFS ノード(バージョン 未満) - サブグラフのデプロイメタデータは IPFS ネットワーク上に保存されます。 グラフノードは、サブグラフのデプロイ時に主に IPFS ノードにアクセスし、サブグラフマニフェストと全てのリンクファイルを取得します。 ネットワーク・インデクサーは独自の IPFS ノードをホストする必要はありません。 ネットワーク用の IPFS ノードは、https://ipfs.network.thegraph.com でホストされています。 ### Prometheus メトリクスサーバー 監視とレポート作成を可能にするため、Graph NodeはオプションでPrometheusのメトリクスサーバーにメトリクスを記録することができます。 -### ソースからのスタート +### Getting started from source -#### インストールの前提条件 +#### Install prerequisites - **Rust** @@ -42,7 +42,7 @@ IPFS ノード(バージョン 未満) - サブグラフのデプロイメ - **IPFS** -- **Ubuntu ユーザーのための追加要件** - グラフノードを Ubuntu 上で動作させるためには、いくつかの追加パッケージが必要になります。 +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpq-dev libssl-dev pkg-config #### Setup -1. PostgreSQL データベースサーバを起動します。 +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. [Graph Node](https://github.com/graphprotocol/graph-node) リポジトリのクローンを作成し、`cargo build` を実行してソースをビルドします +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. 全ての依存関係の設定が完了したら、グラフノードを起動します: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Kubernetesを始めるにあたって -Kubernetesの完全な設定例は、[indexerリポジトリ](https://github.com/graphprotocol/indexer/tree/main/k8s)で見ることができます。 +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Port グラフノードは起動時に以下のポートを公開します。 -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **重要**: ポートを公に公開する場合は注意してください。**管理ポート**はロックしておく必要があります。ノードの JSON-RPC エンドポイント +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## グラフノードの高度な設定 最も単純な場合、Graph Node は、Graph Node の単一のインスタンス、単一の PostgreSQL データベース、IPFS ノード、およびサブグラフのインデックス作成に必要なネットワーク クライアントで操作できます。 -このセットアップは、複数のグラフノードと、それらのグラフノードをサポートする複数のデータベースを追加することで、水平方向に拡張することができます。上級ユーザは、`config.toml`ファイルやグラフノードの環境変数を使って、グラフノードの水平スケーリング機能、およびより高度な設定オプションを利用することができます。 +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -[TOML](https://toml.io/en/) 設定ファイルを使用すると、CLI で公開される設定よりも複雑な設定を行うことができます。ファイルの場所は --config コマンドラインスイッチで渡します。 +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > 設定ファイルを使用する場合、-postgres-url、-postgres-secondary-hosts、および --postgres-host-weights オプションを使用することはできません。 -最小限の`config.toml`ファイルを提供することができます。以下のファイルは、-postgres-urlコマンドラインオプションを使用することと同等です。 +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -`config.toml` の完全なドキュメントは、[Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md) で見ることができます。 +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### 複数のグラフノード @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### デプロイメントルール -複数のグラフ ノードがある場合、同じサブグラフが 2 つの異なるノードによってインデックス付けされないように、新しいサブグラフの展開を管理する必要があります。これにより衝突が発生します。これは、データベース シャーディングが使用されている場合、サブグラフのデータを保存する `shard` も指定できるデプロイメント ルールを使用して実行できます。デプロイメント ルールは、決定を下すために、サブグラフ名と、デプロイメントがインデックスを作成しているネットワークで照合できます。 +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. デプロイメントルールの設定例: @@ -150,7 +150,7 @@ indexers = [ ] ``` -デプロイメントルールについて詳しくは[こちら](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment)をご覧ください。 +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### 専用クエリノード @@ -167,7 +167,7 @@ query = "" ほとんどの場合、1つのPostgresデータベースでグラフノードインスタンスをサポートするのに十分です。グラフノードインスタンスが1つのPostgresデータベースを使い切った場合、グラフノードデータを複数のPostgresデータベースに分割して保存することが可能です。全てのデータベースが一緒になってグラフノードインスタンスのストアを形成します。個々のデータベースはシャード(shard)と呼ばれます。 -シャードは、サブグラフを複数のデータベースに分割するために使用することができ、また、データベース間でクエリの負荷を分散するためにレプリカを使用することができます。これには、各 `graph-node` が各データベースの接続プールに保持すべき、利用可能なデータベース接続の数の設定が含まれ、これはより多くのサブグラフがインデックス化されるにつれて、ますます重要になります。 +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. グラフノードの負荷に既存のデータベースが追いつかず、これ以上データベースサイズを大きくすることができない場合に、シャーディングが有効になります。 @@ -175,11 +175,11 @@ query = "" 接続の設定に関しては、まずpostgresql.confのmax_connectionsを400(あるいは200)に設定し、store_connection_wait_time_msとstore_connection_checkout_count Prometheusメトリクスを見てみてください。顕著な待ち時間(5ms以上)は、利用可能な接続が少なすぎることを示しています。高い待ち時間は、データベースが非常に忙しいこと(CPU負荷が高いなど)によっても引き起こされます。しかし、データベースが安定しているようであれば、待ち時間が長いのは接続数を増やす必要があることを示しています。設定上、各グラフノードインスタンスが使用できるコネクション数は上限であり、グラフノードは必要ないコネクションはオープンにしておきません。 -ストアコンフィギュレーションについて詳しくは[こちら](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases)をご覧ください。 +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### 専用ブロックインジェスト -複数のノードが設定されている場合、設定されているすべてのインデックスノードがチェーンヘッドをポーリングしないように、新しいブロックの取り込みに責任を持つノードを1つ指定する必要があります。これは `chains` 名前空間の一部として行われ、ブロックの取り込みに使われる `node_id` を指定します。 +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### 複数のネットワークに対応 -Graph Protocolは、インデックス作成報酬のためにサポートされるネットワークの数を増やしており、インデックス作成者が処理したいと思うサポートされていないネットワークをインデックスしたサブグラフが多く存在します。`config.toml` ファイルは、表現力豊かで柔軟な設定を可能にします。 +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - 複数のネットワーク - ネットワークごとに複数のプロバイダ(プロバイダ間で負荷を分割することができ、また、フルノードとアーカイブノードを構成することができ、作業負荷が許す限り、Graph Nodeはより安価なプロバイダを優先することができます)。 - 機能、認証、プロバイダの種類など、プロバイダの詳細(実験的なFirehoseのサポートのため) -`[chains]`セクションは、graph-nodeが接続するイーサリアムプロバイダーと、各チェーンのブロックや他のメタデータが格納される場所を制御します。次の例では、mainnet と kovan の 2 つのチェーンを設定し、mainnet のブロックは vip シャードに、kovan のブロックは primary シャードに格納されます。mainnet チェーンでは 2 つの異なるプロバイダを使用できますが、kovan チェーンではプロバイダは 1 つだけです。 +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -プロバイダー設定の詳細については[こちら](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers)をご覧ください。 +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### 環境変数について -グラフ ノードは、機能を有効にしたり、グラフ ノードの動作を変更したりできるさまざまな環境変数をサポートしています。これらは[こちら](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md)に記載されています。 +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### 継続的なデプロイ 高度な構成でスケーリングされたインデックス作成セットアップを運用しているユーザーは、Kubernetes を使用してグラフ ノードを管理することでメリットが得られる場合があります。 -- インデクサーリポジトリには、[Kubernetesリファレンス例](https://github.com/graphprotocol/indexer/tree/main/k8s)があります。 -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) は、GraphOps が管理する Kubernetes 上で Graph Protocol Indexer を動作させるためのツールキットです。グラフノードのデプロイを管理するためのHelmチャートのセットとCLIを提供します。 +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### グラフノードの管理 @@ -229,23 +229,23 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] #### ロギング -グラフノードのログは、グラフノードと特定のサブグラフのデバッグと最適化に役立つ情報を提供します。グラフノードeは、`GRAPH_LOG`環境変数によって、以下のレベルで、異なるログレベルをサポートします:エラー、警告、情報、デバッグ、トレース。 +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -さらに、`GRAPH_LOG_QUERY_TIMING`を`gql`に設定すると、GraphQLクエリの実行方法についてより詳細に知ることができます(ただし、これは大量のログを生成することになります)。 +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### モニタリングとアラート& +#### Monitoring & alerting グラフノードは、デフォルトで8040ポートのPrometheusエンドポイント経由でメトリクスを提供します。そして、Grafanaを使用してこれらのメトリクスを可視化することができます。 -インデクサーリポジトリでは、[Grafanaの設定例](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml)が提供されています。 +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman`はグラフノードのメンテナンスツールで、日常的なタスクや例外的なタスクの診断と解決を支援します。 +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -graphmanコマンドは公式コンテナに含まれており、グラフノードコンテナにdocker execすることで実行できます。その際、`config.toml` ファイルが必要になります。 +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -`graphman` コマンドの完全なドキュメントは グラフノードリポジトリにあります。グラフノードの `/docs` にある \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) を参照してください。 +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### サブグラフの操作 @@ -253,7 +253,7 @@ graphmanコマンドは公式コンテナに含まれており、グラフノー デフォルトではポート8030/graphqlで利用可能なindexing status APIは、異なるサブグラフのindexing statusのチェック、indexing proofのチェック、サブグラフの特徴の検査など、様々なメソッドを公開しています。 -完全なスキーマは[こちら](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql)から入手可能です。 +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### インデックスのパフォーマンス @@ -267,8 +267,8 @@ graphmanコマンドは公式コンテナに含まれており、グラフノー インデックス作成が遅くなる一般的な原因: -- チェーンから関連するイベントを見つけるのにかかる時間 (特にコール ハンドラーは `trace_filter` に依存しているため遅くなることがあります) -- ハンドラの一部として大量の`eth_calls`を作成する +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - 実行中に大量のストアインタラクションが発生 - ストアに保存するデータ量が多い場合 - 処理するイベントの数が多い場合 @@ -287,19 +287,19 @@ graphmanコマンドは公式コンテナに含まれており、グラフノー いくつかのケースでは、失敗はインデクサーによって解決できるかもしれません(例えば、エラーが正しい種類のプロバイダを持っていない結果である場合、必要なプロバイダを追加することでインデックス作成を継続することが可能になります)。しかし、サブグラフのコードを変更する必要がある場合もあります。 -> 決定論的な失敗は「最終」と見なされ、失敗したブロックに対して生成されたインデックス作成の証明が含まれますが、非決定論的な失敗はそうではありません。場合によっては、非決定論的ラベルが正しくなく、サブグラフがエラーを克服することはありません。このような障害は、グラフ ノード リポジトリの問題として報告する必要があります。 +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### ブロックキャッシュとコールキャッシュ -グラフノードは、プロバイダからのリフェッチを節約するために、ストア内の特定のデータをキャッシュします。ブロックは、`eth_calls`の結果と同様にキャッシュされます(後者は特定のブロックのものとしてキャッシュされます)。このキャッシュは、わずかに変更されたサブグラフの "再同期" 時にインデックス作成速度を劇的に向上させることができます。 +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -ただし、場合によっては、イーサリアム ノードが一定期間誤ったデータを提供した場合、それがキャッシュに入り、誤ったデータやサブグラフの失敗につながる可能性があります。この場合、インデクサーは `graphman` を使用して汚染されたキャッシュをクリアし、影響を受けたサブグラフを巻き戻して、(できれば) 正常なプロバイダーから新しいデータをフェッチします。 +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. TX受信欠落イベントなど、ブロックキャッシュの不整合が疑われる場合。 -1. `graphman chain list`でチェーン名を検索します。 -2. `graphman chain check-blocks by-number ` は、キャッシュされたブロックがプロバイダにマッチするかをチェックし、マッチしない場合はキャッシュからブロックを削除します。 - 1. もし違いがあれば、`graphman chain truncate ` でキャッシュ全体を切り捨てた方が安全かもしれません。 +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. ブロックがプロバイダに一致する場合、問題はプロバイダに対して直接デバッグすることができます。 #### 問題やエラーのクエリ @@ -312,7 +312,7 @@ TX受信欠落イベントなど、ブロックキャッシュの不整合が疑 ##### クエリキャッシング -グラフノードはデフォルトで GraphQL クエリをキャッシュし、データベースの負荷を大幅に軽減することができます。これは、`GRAPH_QUERY_CACHE_BLOCKS` と `GRAPH_QUERY_CACHE_MAX_MEM` 設定でさらに設定することができます - 詳しくは [ こちら](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching) を参照してください。 +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### クエリの分析 @@ -320,7 +320,7 @@ TX受信欠落イベントなど、ブロックキャッシュの不整合が疑 また、クエリノードでメモリ使用量が多いことが引き金になる場合もあり、その場合は、まず問題の原因となっているクエリを特定することが課題となります。 -インデクサは [qlog](https://github.com/graphprotocol/qlog/) を使ってグラフノードのクエリログを処理したりまとめたりすることができます。`GRAPH_LOG_QUERY_TIMING` は、遅いクエリを特定しデバッグするために有効です。 +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. クエリが遅い場合、インデクサーにはいくつかのオプションがあります。もちろん、コスト モデルを変更して、問題のあるクエリを送信するコストを大幅に増やすことができます。これにより、そのクエリの頻度が減少する可能性があります。ただし、これでは問題の根本原因が解決しないことがよくあります。 @@ -328,18 +328,18 @@ TX受信欠落イベントなど、ブロックキャッシュの不整合が疑 エンティティを格納するデータベーステーブルには、一般に2つの種類があるようです。エンティティは一度作成されると更新されない「トランザクションライク」なもの、 つまり金融取引のリストのようなものを格納するものと、エンティティが頻繁に更新される「アカウント ライク」なもの、つまり取引が記録されるたびに変更される金融口座のようなものを格納するものである。口座のようなテーブルの特徴は、多くのバージョンの実体を含むが、異なる実体は 比較的少ないということである。このようなテーブルでは、別個のエンティティの数は行(エンティティバージョン)の総数の1%であることがよくある。 -アカウントライクテーブルでは、`graph-node`は、Postgresがどのように高い変更率でデータを保存することになるかの詳細、つまり、最近のブロックのすべてのバージョンがそのテーブルのストレージ全体の小さなサブセクションにあるということを利用したクエリを生成することができます。 +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -コマンド `graphman stats show 展開内の各エンティティ タイプ/テーブルについて、個別のエンティティの数と、各テーブルに含まれるエンティティ バージョンの数を示します。このデータは Postgres 内部の見積もりに基づいているため、必然的に不正確であり、桁違いにずれている可能性があります。 `entities` 列の `-1` は、すべての行に別個のエンティティが含まれていると Postgres が認識していることを意味します。 +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -一般に、別個の実体の数が行/実体のバージョンの総数の1%未満であるテーブルは、アカウントのような最適化の良い候補になります。`graphman stats show`の出力が、テーブルがこの最適化によって恩恵を受けるかもしれないことを示す場合、`graphman stats show
    ` はテーブルをフルカウントし、遅いかもしれませんが、全体のエンティティバージョンに対するdistinct entitiesの比率を正確に測定することができます。 +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -一旦テーブルがアカウントライクであると決定されると、`graphman stats account-like .
    ` を実行すると、そのテーブルに対するクエリのためにアカウントライクの最適化がオンになります。最適化は、`graphman stats account-like --clear .
    ` で再びオフにすることができます。最適化がオンまたはオフになったことにクエリノードが気付くまで、最大で5分間かかります。最適化をオンにした後、その変更によって実際にそのテーブルのクエリが遅くならないことを確認する必要があります。Postgres を監視するように Grafana を構成している場合、遅いクエリは `pg_stat_activity` に大量に表示され、数秒かかることになるはずです。その場合、最適化を再度オフにする必要があります。 +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -Uniswap のようなサブグラフの場合、`pair` および `token` テーブルがこの最適化の最有力候補であり、データベースの負荷に劇的な影響を与える可能性があります。 +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### サブグラフの削除 > これは新しい機能で、Graph Node 0.29.xで利用可能になる予定です。 -ある時点で、インデクサーは与えられたサブグラフを削除したいと思うかもしれません。これは、`graphman drop` によって簡単に行えます。これは、配置とそのインデックスされたデータを全て削除します。配置はサブグラフ名、IPFS ハッシュ `Qm..` またはデータベース名前空間 `sgdNNN` として指定することができます。詳しいドキュメントは、[こちら](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop)にあります。 +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From a231551db5c43aa4a8088fe3ae414e96c6bb9d6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:29 -0500 Subject: [PATCH 0119/1534] New translations graph-node.mdx (Korean) --- .../pages/ko/indexing/tooling/graph-node.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ko/indexing/tooling/graph-node.mdx b/website/src/pages/ko/indexing/tooling/graph-node.mdx index dbbfcd5fc545..6a27301b680b 100644 --- a/website/src/pages/ko/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ko/indexing/tooling/graph-node.mdx @@ -1,5 +1,5 @@ --- -title: Operating Graph Node +title: Graph Node --- Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. @@ -245,7 +245,7 @@ The indexer repository provides an [example Grafana configuration](https://githu The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Working with subgraphs @@ -287,7 +287,7 @@ During indexing subgraphs might fail, if they encounter data that is unexpected, In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache From a18692e377b0c182df50c2280383ce8732b217cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:31 -0500 Subject: [PATCH 0120/1534] New translations graph-node.mdx (Dutch) --- .../pages/nl/indexing/tooling/graph-node.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/nl/indexing/tooling/graph-node.mdx b/website/src/pages/nl/indexing/tooling/graph-node.mdx index d2117bcf07d5..6a27301b680b 100644 --- a/website/src/pages/nl/indexing/tooling/graph-node.mdx +++ b/website/src/pages/nl/indexing/tooling/graph-node.mdx @@ -1,5 +1,5 @@ --- -title: Operating Graph Node +title: Graph Node --- Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -32,9 +32,9 @@ Subgraph deployment metadata is stored on the IPFS network. The Graph Node prima To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### Starten vanuit source +### Getting started from source -#### Installeer vereisten +#### Install prerequisites - **Rust** @@ -42,7 +42,7 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Aanvullende vereisten voor Ubuntu gebruikers** - Om een Graph Node op Ubuntu te laten werken, zijn mogelijke enkele aanvullende updates nodig. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpq-dev libssl-dev pkg-config #### Setup -1. Start een PostgreSQL database server +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Kloon de [Graph Node](https://github.com/graphprotocol/graph-node) map en bouw de bron door `cargo build` uit te voeren +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Nu alle afhankelijkheden zijn ingesteld, start de Graph Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Poort | Doel | Routes | CLI-Argument | Omgevingsvariabele | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (voor subgraph query's) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (voor subgraph abonnementen) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (voor het beheren van implementaties) | / | --admin-port | - | -| 8030 | Subgraph indexeerstatus API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. @@ -245,7 +245,7 @@ The indexer repository provides an [example Grafana configuration](https://githu The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Working with subgraphs @@ -287,7 +287,7 @@ During indexing subgraphs might fail, if they encounter data that is unexpected, In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache From 262d6de61b11649b552598e1779f03a8b8d37900 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:32 -0500 Subject: [PATCH 0121/1534] New translations graph-node.mdx (Polish) --- .../pages/pl/indexing/tooling/graph-node.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/pl/indexing/tooling/graph-node.mdx b/website/src/pages/pl/indexing/tooling/graph-node.mdx index dbbfcd5fc545..6a27301b680b 100644 --- a/website/src/pages/pl/indexing/tooling/graph-node.mdx +++ b/website/src/pages/pl/indexing/tooling/graph-node.mdx @@ -1,5 +1,5 @@ --- -title: Operating Graph Node +title: Graph Node --- Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. @@ -245,7 +245,7 @@ The indexer repository provides an [example Grafana configuration](https://githu The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Working with subgraphs @@ -287,7 +287,7 @@ During indexing subgraphs might fail, if they encounter data that is unexpected, In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache From fe7feffeea80264fbfe87080d0199ac7891d1c72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:33 -0500 Subject: [PATCH 0122/1534] New translations graph-node.mdx (Portuguese) --- .../pages/pt/indexing/tooling/graph-node.mdx | 104 +++++++++--------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/website/src/pages/pt/indexing/tooling/graph-node.mdx b/website/src/pages/pt/indexing/tooling/graph-node.mdx index 38202b73da9c..f8520cff74e5 100644 --- a/website/src/pages/pt/indexing/tooling/graph-node.mdx +++ b/website/src/pages/pt/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Como Operar um Graph Node +title: Graph Node --- O Node do The Graph (Graph Node) é o componente que indexa subgraphs e disponibiliza os dados resultantes a queries (consultas de dados) através de uma API GraphQL. Assim, ele é central ao stack dos indexers, e é crucial fazer operações corretas com um node Graph para executar um indexer com êxito. -Isto providencia um resumo contextual do Graph Node, e algumas das opções mais avançadas disponíveis para indexers. Mais instruções, e documentação, podem no [repositório do Graph Node](https://github.com/graphprotocol/graph-node). +Isto fornece um resumo contextual do Graph Node e algumas das opções mais avançadas disponíveis para indexadores. Para mais instruções e documentação, veja o [repositório do Graph Node](https://github.com/graphprotocol/graph-node). ## Graph Node O [Graph Node](https://github.com/graphprotocol/graph-node) é a implementação de referência para indexar Subgraphs na The Graph Network (rede do The Graph); fazer conexões com clientes de blockchain; indexar subgraphs; e disponibilizar dados indexados para queries. -O Graph Node (e todo o stack do indexador) pode ser executado em bare metal ou num ambiente na nuvem. Esta flexibilidade do componente central de indexing é crucial para a robustez do Protocolo The Graph. Da mesma forma, um Graph Node pode ser [construído do código fonte](https://github.com/graphprotocol/graph-node) ou os indexadores podem usar uma das [imagens disponíveis no Docker](https://hub.docker.com/r/graphprotocol/graph-node). +O Graph Node (e todo o stack dos indexadores) pode ser executado em um sistema bare-metal ou num ambiente na nuvem. Esta flexibilidade do componente central de indexing é importante para a robustez do Protocolo The Graph. Da mesma forma, um Graph Node pode ser [construído do código fonte](https://github.com/graphprotocol/graph-node) ou os indexadores podem usar uma das [imagens disponíveis no Docker](https://hub.docker.com/r/graphprotocol/graph-node). ### Banco de dados PostgreSQL @@ -20,9 +20,9 @@ O armazenamento principal para o Graph Node. É aqui que são guardados dados de Para indexar uma rede, o Graph Node precisa de acesso a um cliente de rede através de uma API JSON-RPC compatível com EVM. Esta RPC (chamada de processamento remoto) pode se conectar a um único cliente de Ethereum; ou o setup pode ser mais complexo, de modo a carregar saldos em múltiplos clientes. -Enquanto alguns subgraphs exigem apenas um node completo, alguns podem ter recursos de indexing que exijam funcionalidades adicionais de RPC. Especificamente, subgraphs que usam o `eth_calls` como parte do indexing exigirão um node de arquivo que apoie o [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898); e subgraphs com `callHandlers`, ou `blockHandlers` com um filtro `call`, exigem apoio ao `trace_filter` ([veja a documentação sobre o trace module (módulo de rastreio) aqui](https://openethereum.github.io/JSONRPC-trace-module)). +Enquanto alguns subgraphs exigem apenas um node completo, alguns podem ter recursos de indexação que precisem de funções adicionais de RPC (chamadas de procedimento remoto). Especificamente, subgraphs que usam o `eth_calls` como parte da indexação exigirão um node de arquivo que apoie o [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898); e subgraphs com `callHandlers`, ou `blockHandlers` com um filtro `call`, exigem apoio ao `trace_filter` (veja a documentação sobre o trace module (módulo de rastreio) [aqui](https://openethereum.github.io/JSONRPC-trace-module)). -**Firehoses de Rede** - um Firehose é um serviço gRPC que providencia uma transmissão ordenada, mas consciente de forks, de blocos, feito pelos programadores centrais do The Graph para melhorar o apoio a indexing eficiente em escala. Isto não é um requisito atual para Indexadores, mas vale os mesmos se familiarizarem com a tecnologia, antes do apoio total à rede. Leia mais sobre o Firehose [aqui](https://firehose.streamingfast.io/). +**Firehoses de Rede** - um Firehose é um serviço de gRPC (chamadas de procedimento remoto - Google) que fornece uma transmissão ordenada — mas consciente de forks — de blocos, feito pelos programadores centrais do The Graph para permitir indexação em escala mais eficiente. Isto não é um requisito atual para Indexadores, mas é ideal que os mesmos experimentem a tecnologia, antes do apoio total à rede. Leia mais sobre o Firehose [aqui](https://firehose.streamingfast.io/). ### Nodes IPFS @@ -32,9 +32,9 @@ Os metadados de lançamento de subgraph são armazenados na rede IPFS. O Graph N O Graph Node pode, opcionalmente, logar métricas a um servidor de métricas Prometheus para permitir funções de relatórios e monitorado. -### Começando da fonte +### Getting started from source -#### Pré-requisitos para a instalação +#### Install prerequisites - **Rust** @@ -42,7 +42,7 @@ O Graph Node pode, opcionalmente, logar métricas a um servidor de métricas Pro - **IPFS** -- **Requisitos Adicionais para utilizadores de Ubuntu** — A execução de um Graph Node no Ubuntu pode exigir pacotes adicionais. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpq-dev libssl-dev pkg-config #### Setup -1. Comece um servidor de banco de dados PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone o repositório do [Graph Node](https://github.com/graphprotocol/graph-node) e construa a fonte executando o `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Agora que todas as dependências estão prontas, inicie o Graph Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,21 +71,21 @@ cargo run -p graph-node --release -- \ ### Como começar com Kubernetes -Veja uma configuração de exemplo completa do Kubernetes no [repositório de indexer](https://github.com/graphprotocol/indexer/tree/main/k8s). +Veja um exemplo completo de configuração do Kubernetes no [repositório do indexer](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Os básicos do Kubernetes Durante a execução, o Graph Node expõe as seguintes portas: -| Porta | Propósito | Rotas | Argumento CLI | Variável de Ambiente | -| --- | --- | --- | --- | --- | -| 8000 | Servidor HTTP GraphQL
    (para queries de subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | WS GraphQL
    (para inscrições a subgraphs) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (para gerir lançamentos) | / | --admin-port | - | -| 8030 | API de status de indexamento do subgraph | /graphql | --index-node-port | - | -| 8040 | Métricas Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Importante:** Cuidado ao expor portas publicamente; as **portas de administração** devem ser trancadas a sete chaves. Isto inclui o endpoint JSON-RPC do Graph Node. +> **Importante**: Cuidado ao expor portas publicamente — **portas de administração** devem ser trancadas a sete chaves. Isto inclui o endpoint JSON-RPC do Graph Node. ## Configurações avançadas do Graph Node @@ -99,22 +99,22 @@ Um arquivo de configuração [TOML](https://toml.io/en/) pode ser usado para faz > Ao usar um arquivo de configuração, não é possível usar as opções --postgres-url, --postgres-secondary-hosts, e --postgres-host-weights. -Um arquivo mínimo `config.toml` pode ser fornecido; o seguinte arquivo é o equivalente à opção de linha de comando --postgres-url: +É possível fornecer um arquivo mínimo `config.toml`; o seguinte arquivo é o equivalente à opção de linha de comando --postgres-url: ```toml [store] [store.primary] -connection="<.. postgres-url argument ..>" +connection="<.. argumento postgres-url ..>" [deployment] [[deployment.rule]] -indexers = [ "<.. list of all indexing nodes ..>" ] +indexers = [ "<.. lista de todos os nodes de indexação ..>" ] ``` -Veja a documentação completa do `config.toml` na documentação do [Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +A documentação completa do `config.toml` pode ser encontrada nos [documentos do Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Múltiplos Graph Nodes -A indexação de Graph Nodes pode escalar horizontalmente, com a execução de várias instâncias de Graph Node para separar indexação de queries em nodes diferentes. Isto é facilmente possível com a execução de Graph Nodes, configurados com um `node_id` diferente na inicialização (por ex. no arquivo Docker Compose), que pode então ser usado no arquivo `config.toml` para especificar [nodes dedicados de consulta](#dedicated-query-nodes), [ingestores de blocos](#dedicated-block-ingestion) e separar subgraphs entre nódulos com [regras de lançamento](#deployment-rules). +A indexação de Graph Nodes pode ser escalada horizontalmente, com a execução de várias instâncias de Graph Node para separar indexação de queries em nodes diferentes. Isto é possível só com a execução de Graph Nodes, configurados com um `node_id` diferente na inicialização (por ex. no arquivo Docker Compose), que pode então ser usado no arquivo `config.toml` para especificar [nodes dedicados de query](#dedicated-query-nodes), [ingestores de blocos](#dedicated-block-ingestion") e separar subgraphs entre nódulos com [regras de lançamento](#deployment-rules). > Note que vários Graph Nodes podem ser configurados para usar o mesmo banco de dados — que, por conta própria, pode ser escalado horizontalmente através do sharding. @@ -150,7 +150,7 @@ indexers = [ ] ``` -Leia mais sobre as regras de lançamento [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Saiba mais sobre regras de lançamento [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Nodes dedicados de query @@ -167,7 +167,7 @@ Qualquer node cujo --node-id combina com a expressão regular será programado p Para a maioria dos casos de uso, um único banco de dados Postgres é suficiente para apoiar uma instância de graph-node. Quando uma instância de graph-node cresce mais que um único banco Postgres, é possível dividir o armazenamento dos dados do graph-node entre múltiplos bancos Postgres. Todos os bancos de dados, juntos, formam o armazenamento da instância do graph-node. Cada banco de dados individual é chamado de shard. -Os shards são úteis para dividir lançamentos de subgraph em múltiplos bancos de dados, e podem também ser configurados a usarem réplicas para dividir a carga de query entre bancos de dados. Isto inclui a configuração do número de conexões disponíveis do banco que cada `graph-node` deve manter em seu pool de conexão para cada banco, o que fica cada vez mais importante conforme são indexados mais subgraphs. +Os shards servem para dividir lançamentos de subgraph em múltiplos bancos de dados, e podem também ser configurados para usar réplicas a fim de dividir a carga de query entre bancos de dados. Isto inclui a configuração do número de conexões disponíveis do banco que cada `graph-node` deve manter em seu pool de conexão para cada banco, o que fica cada vez mais importante conforme são indexados mais subgraphs. O sharding torna-se útil quando o seu banco de dados existente não aguenta o peso do Graph Node, e quando não é mais possível aumentar o tamanho do banco. @@ -175,7 +175,7 @@ O sharding torna-se útil quando o seu banco de dados existente não aguenta o p Em termos de configuração de conexões, comece com o max_connections no postgresql.conf configurado em 400 (ou talvez até 200) e preste atenção nas métricas do Prometheus store_connection_wait_time_ms e store_connection_checkout_count. Tempos de espera óbvios (acima de 5ms) indicam que há poucas conexões disponíveis; também podem ser causados por atividade excessiva no banco de dados (como uso alto de CPU). Mas caso o banco de dados pareça estável fora isto, os tempos de espera longos indicam uma necessidade de aumento no número de conexões. Na configuração, há um limite máximo de conexões que cada instância graph-node pode usar, e o Graph Node não manterá conexões abertas caso não sejam necessárias. -Leia mais sobre configuração de armazenamento [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Veja mais sobre configurações de armazenamento [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Ingestão dedicada de blocos @@ -188,7 +188,7 @@ ingestor = "block_ingestor_node" #### Apoio a múltiplas redes -O Graph Protocol só aumenta o número de redes com apoio a recompensas de indexing, e existem muitos subgraphs a indexarem redes não apoiadas que um indexer gostaria de processar. O arquivo `config.toml` permite a configuração expressiva e flexível de: +O Graph Protocol só aumenta o número de redes, com apoio a recompensas de indexação, e existem muitos subgraphs a indexarem redes não apoiadas que um indexador gostaria de processar. O arquivo config.toml permite a configuração expressiva e flexível de: - Múltiplas redes - Múltiplos provedores por rede (isto pode permitir a separação de peso entre eles, e pode permitir a configuração de nodes completos além de nodes de arquivo; o Graph Node prefere provedores mais baratos, caso permita uma carga de trabalho). @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Leia mais sobre configuração de provedores [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Veja mais sobre configurações de provedor [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Variáveis de ambiente -O Graph Node apoia uma gama de variáveis de ambiente, que podem permitir recursos ou mudar o comportamento do Graph Node. Leia mais sobre as variáveis [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +O Graph Node apoia vários variáveis de ambiente que podem ativar funções ou mudar o seu comportamento. Mais informações [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Lançamento contínuo Os utilizadores a operar um setup de indexing escalado, com configurações avançadas, podem ganhar mais ao gerir os seus Graph Nodes com o Kubernetes. -- O repositório de indexadores tem uma [referência de exemplo do Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) -- O [Launchpad](https://docs.graphops.xyz/launchpad/intro) é um conjunto de ferramentas para executar um Indexer do Graph Protocol no Kubernetes, mantido pelo GraphOps. Ele providencia um conjunto de charts Helm e uma CLI para gerir um lançamento de Graph Node. +- O repositório do indexer tem um [exemplo de referência de Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) +- O [Launchpad](https://docs.graphops.xyz/launchpad/intro) é um conjunto kit de ferramentas para executar um Indexer do Graph Protocol no Kubernetes, mantido pelo GraphOps. Ele fornece um conjunto de charts Helm e uma CLI para administrar um lançamento de Graph Node. ### Como gerir o Graph Node @@ -229,23 +229,23 @@ Dado um Graph Node (ou Nodes!) em execução, o desafio torna-se gerir subgraphs #### Logging -Os logs do Graph Node podem fornecer informações úteis, para o debug, e a otimização do Graph Node e de subgraphs específicos. O Graph Node apoia níveis diferentes de logs através da variável de ambiente `GRAPH_LOG`, com os seguintes níveis: error, warn, info, debug ou trace. +Os logs do Graph Node podem fornecer informações úteis, para debug e otimização — do Graph Node e de subgraphs específicos. O Graph Node apoia níveis diferentes de logs através da variável de ambiente `GRAPH_LOG`, com os seguintes níveis: `error`, `warn`, `info`, `debug` ou `trace`. -Além disto, configurar o `GRAPH_LOG_QUERY_TIMING` em `gql` fornece mais detalhes sobre o processo de queries no GraphQL (porém, isto causará um grande volume de logs). +Além disto, configurar o `GRAPH_LOG_QUERY_TIMING` para `gql` fornece mais detalhes sobre o processo de queries no GraphQL (porém, isto criará um grande volume de logs). -#### Monitorado & alerta +#### Monitoração e alertas Naturalmente, o Graph Node fornece as métricas através do endpoint Prometheus na porta 8040. Estas métricas podem ser visualizadas no Grafana. -O repositório do indexer providencia um [exemplo de configuração de Grafana](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +O repositório do indexer fornece um [exemplo de configuração do Grafana](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman O `graphman` é uma ferramenta de manutenção para o Graph Node, que ajuda com o diagnóstico e a resolução de tarefas diferentes, sejam excecionais ou no dia a dia. -O comando graphman é incluído nos containers oficiais, e pode ser executado com o docker exec no seu container de graph-node. Ele exige um arquivo `config.toml`. +O comando `graphman` é incluído nos containers oficiais, e pode ser executado com o docker `exec` no seu container de `graph-node`. Ele exige um arquivo `config.toml`. -A documentação completa dos comandos do `graphman` está no repositório do Graph Node. Veja o \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) no `/docs` do Graph Node. +A documentação completa dos comandos do `graphman` está no repositório do Graph Node. Veja o [/docs/graphman.md](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) no `/docs` do Graph Node ### Como trabalhar com subgraphs @@ -253,7 +253,7 @@ A documentação completa dos comandos do `graphman` está no repositório do Gr Inicialmente disponível na porta 8030/graphql, a API de estado de indexação expõe uma gama de métodos para conferir o estado da indexação para subgraphs diferentes, conferir provas de indexação, inspecionar características de subgraphs, e mais. -O schema completo está disponível [aqui](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +Veja o schema completo [aqui](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Desempenho da indexação @@ -267,8 +267,8 @@ Estes estágios são segmentados (por ex., podem ser executados em paralelo), ma Causas comuns de lentidão na indexação: -- Tempo para encontrar eventos relevantes na chain (handlers de chamada em particular podem ser lentos, dada a dependência no `trace_filter`) -- Fazer uma grande quantidade de `eth_calls` como parte de handlers +- Tempo para encontrar eventos relevantes na chain (handlers de chamada em particular podem demorar mais, dada a dependência no `trace_filter`) +- Fazer muitos `eth_calls` como parte de handlers - Excesso de interações no armazenamento durante a execução - Muitos dados para guardar no armazenamento - Muitos eventos a serem processados @@ -291,15 +291,15 @@ Em alguns casos, uma falha pode ser resolvida pelo indexador (por ex. a indexaç #### Cache de blocos e chamadas -O Graph Node cacheia certos dados no armazenamento para poupar um refetching do provedor. São cacheados não são blocos, como também os resultados do `eth_calls` (este último, cacheado a partir de um bloco específico). Este caching pode aumentar dramaticamente a velocidade de indexação durante a "ressincronização" de um subgraph levemente alterado. +O Graph Node cacheia certos dados no armazenamento para poupar um refetching do provedor. São cacheados os blocos e os resultados do `eth_calls` (este último, cacheado a partir de um bloco específico). Este caching pode aumentar dramaticamente a velocidade de indexação durante a "ressincronização" de um subgraph levemente alterado. -Porém, em algumas instâncias, se um node Ethereum tiver fornecido dados incorretos em algum período, isto pode entrar no cache, o que causa dados incorretos ou subgraphs falhos. Neste caso, os indexadores podem usar o `graphman` para limpar o cache envenenado e rebobinar os subgraphs afetados, que retirarão dados frescos do provedor saudável (idealmente). +Porém, em algumas instâncias, se um node Ethereum tiver fornecido dados incorretos em algum período, isto pode entrar no cache, o que causa dados incorretos ou subgraphs falhos. Neste caso, os indexadores podem usar o `graphman` para limpar o cache envenenado e rebobinar os subgraphs afetados, que retirarão dados frescos do provedor (idealmente) saudável. -Caso haja uma suspeita de inconsistência no cache de blocos, como a falta de um evento `tx receipt missing`: +Caso haja uma suspeita de inconsistência no cache de blocos, como a falta de um evento tx receipt missing: -1. `graphman chain list` para achar o nome da chain. -2. `graphman chain check-blocks by-number ` confere se o bloco no cache corresponde ao provedor, e apaga o bloco do cache se não for o caso. - 1. Caso haja uma diferença, pode ser mais seguro truncar o cache inteiro com `graphman chain truncate `. +1. Digite `graphman chain list` para buscar o nome da chain. +2. `graphman chain check-blocks by-number ` verificará se o bloco no cache corresponde ao provedor; se não for o caso, o bloco será apagado do cache. + 1. Se houver uma diferença, pode ser mais seguro truncar o cache inteiro com `graphman chain truncate `. 2. Caso o bloco corresponda ao provedor, então o problema pode ser debugado em frente ao provedor. #### Erros e problemas de query @@ -312,7 +312,7 @@ Não há uma "bala de prata", mas sim uma gama de ferramentas para prevenir, dia ##### Caching de query -O Graph Node naturalmente cacheia queries no GraphQL, o que pode reduzir muito a carga no banco de dados. Isto pode ser configurado mais profundamente com `GRAPH_QUERY_CACHE_BLOCKS` e `GRAPH_QUERY_CACHE_MAX_MEM` - leia mais [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +O Graph Node naturalmente cacheia queries no GraphQL, o que pode reduzir muito a carga no banco de dados. Isto pode ser configurado mais profundamente com `GRAPH_QUERY_CACHE_BLOCKS` e `GRAPH_QUERY_CACHE_MAX_MEM` — leia mais [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Análise de queries @@ -334,12 +334,12 @@ O comando `graphman stats show mostra, para cada tipo/tábua de entid Em geral, tábuas em que o número de entidades distintas é menos de 1% do total de linhas/versões de entidade são boas candidatas para a otimização tipo-conta. Quando o resultado do `graphman stats show` indica que uma tábua pode se beneficiar desta otimização, ativar o `graphman stats show
    ` fará uma contagem completa da tábua - que pode ser lenta, mas dá uma medida precisa da proporção entre entidades distintas e total de versões. -Quando uma tábua for determinada como "tipo-conta", executar o `graphman stats account-like .
    ` ativará a otimização tipo-conta para queries frente àquela tábua. A otimização pode ser desativada novamente com `graphman stats account-like --clear .
    `. Os nodes de consulta levam até 5 minutos para perceber que a otimização foi ligada ou desligada. Após ativar a otimização, verifique se a mudança não retarda consultas para aquela tábua. Caso tenha configurado o Grafana para monitorar o Postgres, muitos queries lentos podem aparecer no `pg_stat_activity`, com demora de vários segundos. Neste caso, a otimização precisa ser desativada novamente. +Quando uma tábua for determinada como "tipo-conta", executar o `graphman stats account-like .
    ` ativará a otimização tipo-conta para queries frente àquela tábua. A otimização pode ser desativada novamente com `graphman stats account-like --clear .
    `. Os nodes de consulta levam até 5 minutos para perceber que a otimização foi ligada ou desligada. Após ativar a otimização, verifique se a mudança não desacelera os queries para aquela tábua. Caso tenha configurado o Grafana para monitorar o Postgres, muitos queries lentos podem aparecer no `pg_stat_activity`, com demora de vários segundos. Neste caso, a otimização precisa ser desativada novamente. -Para subgraphs parecidos com o Uniswap, as tábuas `pair` e `token` são ótimas candidatas para esta otimização, e podem ter efeitos surpreendentes na carga do banco de dados. +Para subgraphs parecidos com o Uniswap, as tábuas `pair` e `token` são ótimas para esta otimização, e podem ter efeitos surpreendentes na carga do banco de dados. #### Como remover subgraphs > Esta é uma funcionalidade nova, que estará disponível no Graph Node 0.29.x -Em certo ponto, o indexador pode querer remover um subgraph dado. É só usar o `graphman drop`, que apaga um lançamento e todos os seus dados indexados. O lançamento pode ser especificado como o nome de um subgraph, um hash IPFS `Qm..`, ou o namespace de banco de dados `sgdNNN`. Mais documentos sobre o processo [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +Em certo ponto, o indexador pode querer remover um subgraph. É só usar o `graphman drop`, que apaga um lançamento e todos os seus dados indexados. O lançamento pode ser especificado como o nome de um subgraph, um hash IPFS `Qm..`, ou o namespace de banco de dados `sgdNNN`. Mais documentos sobre o processo [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 143a23f970009feb8c2e567cc525ee4abcd55597 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:34 -0500 Subject: [PATCH 0123/1534] New translations graph-node.mdx (Russian) --- .../pages/ru/indexing/tooling/graph-node.mdx | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/website/src/pages/ru/indexing/tooling/graph-node.mdx b/website/src/pages/ru/indexing/tooling/graph-node.mdx index dad9f543237a..500a21a3d5f1 100644 --- a/website/src/pages/ru/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ru/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Эксплуатация Graph Node +title: Graph Node --- Graph Node — это компонент, который индексирует подграфы и делает полученные данные доступными для запроса через GraphQL API. Таким образом, он занимает центральное место в стеке индексатора, а правильная работа Graph Node имеет решающее значение для успешного запуска индексатора. -Здесь представлен контекстуальный обзор Graph Node и некоторые более продвинутые параметры, доступные индексаторам. Подробную документацию и инструкции можно найти в [репозитории Graph Node](https://github.com/graphprotocol/graph-node). +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) — это эталонная реализация для индексации подграфов в The Graph Network, подключения к клиентам блокчейна, индексирования подграфов и предоставления индексированных данных для запроса. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -Graph Node (и весь стек индексаторов) можно запускать на «голом железе» или в облачной среде. Эта гибкость центрального компонента индексации имеет решающее значение для надежности The Graph Protocol. Точно так же Graph Node может быть [создана из исходного кода](https://github.com/graphprotocol/graph-node), или индексаторы могут использовать один из [, предусмотренных Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### База данных PostgreSQL @@ -20,7 +20,7 @@ Graph Node (и весь стек индексаторов) можно запус Для индексации сети Graph Node требуется доступ к сетевому клиенту через EVM-совместимый JSON-RPC API. Этот RPC может подключаться к одному клиенту или может представлять собой более сложную настройку, которая распределяет нагрузку между несколькими. -В то время как для некоторых субграфов может потребоваться полная нода, другие могут иметь функции индексации, для которых требуются дополнительные функции RPC. В частности, для субграфов, которые выполняют `eth_calls` как часть индексации, потребуется нода архива, поддерживающая [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), а субграфы с `callHandlers` или `blockHandlers` с фильтром `call` требуют поддержки `trace_filter` ([см. документацию по модулю трассировки здесь](https://openethereum.github.io/JSONRPC-trace-module)). +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). @@ -32,9 +32,9 @@ Graph Node (и весь стек индексаторов) можно запус Чтобы включить мониторинг и отчетность, Graph Node может дополнительно регистрировать метрики на сервере метрик Prometheus. -### Начало работы с исходным кодом +### Getting started from source -#### Установка предварительного обеспечения +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ Graph Node (и весь стек индексаторов) можно запус - **IPFS** -- **Дополнительные требования для пользователей Ubuntu**. Для запуска Graph Node в Ubuntu может потребоваться несколько дополнительных пакетов. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Настройка +#### Setup -1. Запустите сервер базы данных PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Клонируйте репозиторий [Graph Node](https://github.com/graphprotocol/graph-node) и соберите исходный код, запустив `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Теперь, когда все зависимости настроены, запустите Graph Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Начало работы с Kubernetes -Полный пример конфигурации Kubernetes можно найти в [репозитории индексатора](https://github.com/graphprotocol/indexer/tree/main/k8s). +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Порты Во время работы Graph Node предоставляет следующие порты: -| Порт | Назначение | Расположение | CLI-аргумент | Переменная среды | -| --- | --- | --- | --- | --- | -| 8000 | HTTP-сервер GraphQL
    (для запросов подграфов) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (для подписок на подграфы) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (для управления процессом развертывания) | / | --admin-port | - | -| 8030 | API для определения статуса индексирования подграфов | /graphql | --index-node-port | - | -| 8040 | Показатели Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Важно**. Будьте осторожны, открывая порты для общего доступа — **порты администрирования** должны быть заблокированы. Это касается конечных точек Graph Node JSON-RPC. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Расширенная настройка Graph Node На простейшем уровне Graph Node может работать с одним экземпляром Graph Node, одной базой данных PostgreSQL, нодой IPFS и сетевыми клиентами в соответствии с требованиями субграфов, подлежащих индексированию. -Эту настройку можно масштабировать горизонтально, добавляя несколько Graph Node и несколько баз данных для поддержки этих Graph Node. Опытные пользователи могут воспользоваться некоторыми возможностями горизонтального масштабирования Graph Node, а также некоторыми более продвинутыми параметрами конфигурации через файл `config.toml` и переменные среды Graph Node. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -Файл конфигурации [TOML](https://toml.io/en/) можно использовать для установки более сложных конфигураций, чем те, которые представлены в интерфейсе командной строки. Местоположение файла передается с помощью параметра командной строки --config. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > При использовании файла конфигурации невозможно использовать параметры --postgres-url, --postgres-secondary-hosts и --postgres-host-weights. -Можно предоставить минимальный файл `config.toml`; следующий файл эквивалентен использованию опции командной строки --postgres-url: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -Полную документацию по `config.toml` можно найти в [документах Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Множественные Graph Node @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### Правила развертывания -При наличии нескольких Graph нод необходимо управлять развертыванием новых подграфов таким образом, чтобы один и тот же подграф не индексировался двумя разными нодами, что могло бы привести к конфликтам. Это можно сделать с помощью правил развертывания, которые также могут указывать, в каком `shard` должны храниться данные подграфа, если используется сегментирование базы данных. Правила развертывания могут сочетать имена подграфа и сети, которую индексирует развертывание, чтобы принять решение. +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. Пример настройки правил развертывания: @@ -150,7 +150,7 @@ indexers = [ ] ``` -Подробнее о правилах развертывания читайте [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Выделенные ноды запросов @@ -167,7 +167,7 @@ query = "" В большинстве случаев одной базы данных Postgres достаточно для поддержки отдельной Graph Node. Когда отдельная Graph Node перерастает одну базу данных Postgres, можно разделить хранилище данных Graph Node между несколькими базами данных Postgres. Все базы данных вместе образуют хранилище отдельной Graph Node. Каждая отдельная база данных называется шардом (сегментом). -Сегменты можно использовать для разделения развертываний подграфов между несколькими базами данных, а также для использования копий с целью распределения нагрузки запросов между базами данных. Это включает в себя настройку количества доступных подключений к базе данных, которые каждый `graph-node` должен хранить в своем пуле подключений для каждой базы данных. Это становится все более важным по мере индексации большего количества подграфов. +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. Сегментирование становится полезным, когда Ваша существующая база данных не может справиться с нагрузкой, которую на нее возлагает Graph Node, и когда больше невозможно увеличить размер базы данных. @@ -175,11 +175,11 @@ query = "" Что касается настройки соединений, начните с max_connections в postgresql.conf, установленного на 400 (или, может быть, даже на 200), и посмотрите на метрики store_connection_wait_time_ms и store_connection_checkout_count Prometheus. Длительное время ожидания (все, что превышает 5 мс) является признаком того, что доступных соединений слишком мало; большое время ожидания также будет вызвано тем, что база данных очень загружена (например, высокая загрузка ЦП). Однако, если в остальном база данных кажется стабильной, большое время ожидания указывает на необходимость увеличения количества подключений. В конфигурации количество подключений, которое может использовать каждая отдельная Graph Node, является верхним пределом, и Graph Node не будет держать соединения открытыми, если они ей не нужны. -Подробнее о настройке хранилища читайте [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Прием выделенного блока -Если настроено несколько нод, необходимо выделить одну, которая будет отвечать за прием новых блоков, чтобы все сконфигурированные ноды индекса не опрашивали головную часть чейна. Это делается как часть пространства имен `chains`, в котором `node_id`, будет использоваться для приема блоков: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### Поддержка нескольких сетей -Протокол The Graph увеличивает количество сетей, поддерживаемых для индексации вознаграждений, и существует множество подграфов, индексирующих неподдерживаемые сети, которые индексатор хотел бы обработать. Файл `config.toml` обеспечивает ярко выраженную и гибкую настройку: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Несколько сетей - Несколько провайдеров на сеть (это может позволить разделить нагрузку между провайдерами, а также может позволить настроить полные ноды, а также архивные ноды, при этом Graph Node предпочитает более дешевых поставщиков, если позволяет данная рабочая нагрузка). - Дополнительные сведения о провайдере, такие как функции, аутентификация и тип провайдера (для экспериментальной поддержки Firehose) -Раздел `[chains]` управляет провайдерами Ethereum, к которым подключается graph-node, и где хранятся блоки и другие метаданные для каждого чейна. В следующем примере настраиваются два чейна, mainnet и kovan, где блоки для mainnet хранятся в сегменте vip, а блоки для kovan — в основном сегменте. Чейн основной сети может использовать двух разных провайдеров, тогда как у kovan есть только один провайдер. +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Подробнее о настройке провайдера читайте [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Переменные среды -Graph Node поддерживает ряд переменных среды, которые могут включать функции или изменять поведение Graph Node. Они описаны [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Непрерывное развертывание Пользователи, использующие масштабируемую настройку индексирования с расширенной конфигурацией, могут получить преимущество от управления своими узлами Graph с помощью Kubernetes. -- В репозитории индексатора есть [пример справочника по Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) – это набор инструментов для запуска Graph Protocol индексатора в Kubernetes, поддерживаемый GraphOps. Он предоставляет набор диаграмм Helm и интерфейс командной строки для управления развертыванием Graph Node. +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### Управление Graph Node @@ -229,23 +229,23 @@ Graph Node поддерживает ряд переменных среды, ко #### Логирование (ведение журналов) -Логи Graph Node могут предоставить полезную информацию для отладки и оптимизации Graph Node и конкретных подграфов. Graph Node поддерживает различные уровни логов с помощью переменной среды `GRAPH_LOG` со следующими уровнями: ошибка, предупреждение, информация, отладка или трассировка. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -Кроме того, установка для `GRAPH_LOG_QUERY_TIMING` значения `gql` предоставляет дополнительные сведения о том, как выполняются запросы GraphQL (хотя это приводит к созданию большого объема логов). +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### Мониторинг и оповещения +#### Monitoring & alerting Graph Node предоставляет метрики через конечную точку Prometheus на порту 8040 по умолчанию. Затем можно использовать Grafana для визуализации этих метрик. -В репозитории индексатора представлен [пример конфигурации Grafana](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman` – это инструмент обслуживания Graph Node, помогающий диагностировать и решать различные повседневные и исключительные задачи. +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -Команда graphman включена в официальные контейнеры, и Вы можете выполнить docker exec в контейнере graph-node, чтобы запустить ее. Для этого требуется файл `config.toml`. +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Полная документация по командам `graphman` доступна в репозитории Graph Node. См. \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) в Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Работа с подграфами @@ -253,7 +253,7 @@ Graph Node предоставляет метрики через конечную Доступный по умолчанию на порту 8030/graphql, API статуса индексирования предоставляет ряд методов для проверки статуса индексирования для различных подграфов, проверки доказательств индексирования, проверки функций подграфов и многого другого. -Полная схема доступна [здесь](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Производительность индексирования @@ -267,8 +267,8 @@ Graph Node предоставляет метрики через конечную Распространенные причины низкой скорости индексации: -- Время, затрачиваемое на поиск соответствующих событий в чейне (в частности, обработчики вызовов могут работать медленно, учитывая зависимость от `trace_filter`) -- Создание большого количества `eth_call` в составе обработчиков +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - Большое количество операций с хранилищем во время выполнения - Большой объем данных для сохранения в хранилище - Большое количество событий для обработки @@ -287,19 +287,19 @@ Graph Node предоставляет метрики через конечную В некоторых случаях сбой может быть устранен индексатором (например, если ошибка вызвана отсутствием нужного поставщика, добавление необходимого поставщика позволит продолжить индексирование). Однако в других случаях требуется изменить код подграфа. -> Детерминированные сбои считаются «окончательными», при этом для отказавшего блока генерируется Доказательство индексации, а недетерминированные сбои — нет, поскольку подграфу может удаться «отменить сбой» и продолжить индексирование. В некоторых случаях недетерминированная метка неверна, и подграф никогда не преодолеет ошибку; о таких сбоях следует сообщать как о проблемах в репозитории Graph Node. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Кэш блокировки и вызова -Graph Node кэширует определенные данные в хранилище, чтобы избежать повторной загрузки от провайдера. Блоки кэшируются, как и результаты `eth_calls` (последние кэшируются для определенного блока). Такое кэширование может резко увеличить скорость индексации при «повторной синхронизации» слегка измененного подграфа. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -Однако, в некоторых случаях, если нода Ethereum предоставила неверные данные за какой-то период, они могут попасть в кеш, что приведет к некорректным данным или повреждённым субграфам. В этом случае индексаторы могут использовать `graphman` для очистки испорченного кеша, а затем перематывать затронутые субграфы, которые затем будут получать свежие данные от (мы надеемся на это) исправного поставщика. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Если есть подозрение на несогласованность кэша блоков, например, событие отсутствия квитанции tx: -1. `graphman chain list`, чтобы найти название чейна. -2. `graphman chain check-blocks by-number ` проверит, соответствует ли кэшированный блок провайдеру, и удалит блок из кэша, если это не так. - 1. Если есть разница, может быть безопаснее усечь весь кеш с помощью `graphman chain truncate `. +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. Если блок соответствует провайдеру, то проблема может быть отлажена непосредственно провайдером. #### Запрос проблем и ошибок @@ -312,7 +312,7 @@ Graph Node кэширует определенные данные в храни ##### Кэширование запросов -Graph Node по умолчанию кэширует запросы GraphQL, что может значительно снизить нагрузку на базу данных. Это можно дополнительно настроить с помощью параметров `GRAPH_QUERY_CACHE_BLOCKS` и `GRAPH_QUERY_CACHE_MAX_MEM` — подробнее [здесь](https://github.com/graphprotocol/graph-node/blob/master. /docs/environment-variables.md#graphql-caching). +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Анализ запросов @@ -320,7 +320,7 @@ Graph Node по умолчанию кэширует запросы GraphQL, чт В других случаях триггером может быть высокий уровень использования памяти на ноде запроса, и в этом случае сначала нужно определить запрос, вызвавший проблему. -Индексаторы могут использовать [qlog](https://github.com/graphprotocol/qlog/) для обработки и обобщения логов запросов Graph Node. Также можно включить `GRAPH_LOG_QUERY_TIMING` для выявления и отладки медленных запросов. +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. При медленном запросе у индексаторов есть несколько вариантов. Разумеется, они могут изменить свою модель затрат, чтобы значительно увеличить стоимость отправки проблемного запроса. Это может привести к снижению частоты этого запроса. Однако это часто не устраняет основной причины проблемы. @@ -328,18 +328,18 @@ Graph Node по умолчанию кэширует запросы GraphQL, чт Таблицы базы данных, в которых хранятся объекты, как правило, бывают двух видов: «подобные транзакциям», когда объекты, однажды созданные, никогда не обновляются, т. е. они хранят что-то вроде списка финансовых транзакций и «подобные учетной записи», где объекты обновляются очень часто, т. е. они хранят что-то вроде финансовых счетов, которые изменяются каждый раз при записи транзакции. Таблицы, подобные учетным записям, характеризуются тем, что они содержат большое количество версий объектов, но относительно мало отдельных объектов. Часто в таких таблицах количество отдельных объектов составляет 1% от общего количества строк (версий объектов) -Для таблиц, подобных учетным записям, `graph-node` может генерировать запросы, в которых используются детали того, как Postgres в конечном итоге сохраняет данные с такой высокой скоростью изменения, а именно, что все версии последних блоков находятся в небольшом подразделе общего хранилища для такой таблицы. +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -Команда `graphman stats show показывает для каждого типа/таблицы объектов в развертывании, сколько различных объектов и сколько версий объектов содержит каждая таблица. Эти данные основаны на внутренних оценках Postgres и, следовательно, неточны и могут отличаться на порядок. `-1` в столбце `entities` означает, что Postgres считает, что все строки содержат отдельный объект. +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -Как правило, таблицы, в которых количество отдельных объектов составляет менее 1 % от общего количества версий строк/объектов, являются хорошими кандидатами на оптимизацию по аналогии с учетными записями. Если выходные данные `graphman stats show` указывают на то, что эта оптимизация может принести пользу таблице, запуск `graphman stats show
    ` произведёт полный расчет таблицы. Этот процесс может быть медленным, но обеспечит точную степень соотношения отдельных объектов к общему количеству версий объекта. +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -Как только таблица будет определена как учетная запись, запуск `graphman stats account-like .
    `, включит оптимизацию, подобную учетной записи, для запросов к этой таблице. Оптимизацию можно снова отключить с помощью `graphman stats-like account --clear .
    `. Нодам запроса требуется до 5 минут, чтобы заметить, что оптимизация включена или выключена. После включения оптимизации необходимо убедиться, что изменение фактически не приводит к замедлению запросов к этой таблице. Если Вы настроили Grafana для мониторинга Postgres, медленные запросы будут отображаться в `pg_stat_activity` в больших количествах, это займет несколько секунд. В этом случае оптимизацию необходимо снова отключить. +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -Для подграфов, подобных Uniswap, таблицы `pair` и `token` являются первыми кандидатами на эту оптимизацию и могут существенно повлиять на нагрузку базы данных. +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### Удаление подграфов > Это новый функционал, который будет доступен в Graph Node 0.29.x -В какой-то момент индексатору может потребоваться удалить данный подграф. Это можно легко сделать с помощью `graphman drop`, который удаляет развертывание и все его проиндексированные данные. Развертывание может быть задано как имя подграфа, хэш IPFS `Qm..` или пространство имен базы данных `sgdNNN`. Дополнительную документацию можно найти [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 192aad41fc572be2f5f1129f07271e54acbfe8c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:35 -0500 Subject: [PATCH 0124/1534] New translations graph-node.mdx (Swedish) --- .../pages/sv/indexing/tooling/graph-node.mdx | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/website/src/pages/sv/indexing/tooling/graph-node.mdx b/website/src/pages/sv/indexing/tooling/graph-node.mdx index 65b506e574dd..55d1133f5e5e 100644 --- a/website/src/pages/sv/indexing/tooling/graph-node.mdx +++ b/website/src/pages/sv/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Drift av Graf Node +title: Graf Node --- Graf Node är komponenten som indexerar subgraffar och gör den resulterande datan tillgänglig för förfrågan via en GraphQL API. Som sådan är den central för indexeringsstacken, och korrekt drift av Graph Node är avgörande för att driva en framgångsrik indexerare. -Detta ger en kontextuell översikt över Graph Node och några av de mer avancerade alternativ som är tillgängliga för indexerare. Detaljerad dokumentation och instruktioner finns i [Graph Node-repositoriet](https://github.com/graphprotocol/graph-node). +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graf Node -[Graf Node](https://github.com/graphprotocol/graph-node) är referensimplementationen för indexeringsavsnitt på The Graph Nätverk, som ansluter till blockchain-klienter, indexerar subgraffar och gör indexerad data tillgänglig för förfrågan. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -Graf Node (och hela indexeringsstacken) kan köras på rå metall eller i en molnmiljö. Flexibiliteten hos den centrala indexeringskomponenten är avgörande för robustheten i The Graph Protocol. På samma sätt kan Graph Node [byggas från källan](https://github.com/graphprotocol/graph-node) eller indexerare kan använda en av de [medföljande Docker-bilderna](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL-databas @@ -20,7 +20,7 @@ Huvudlagret för Graph Node, här lagras subgrafdata, liksom metadata om subgraf För att indexera ett nätverk behöver Graf Node åtkomst till en nätverksklient via ett EVM-kompatibelt JSON-RPC API. Denna RPC kan ansluta till en enda klient eller så kan det vara en mer komplex konfiguration som lastbalanserar över flera. -Medan vissa subgrafer kan kräva en fullständig nod, kan vissa ha indexeringsfunktioner som kräver ytterligare RPC-funktionalitet. Specifikt subgrafer som gör `eth_calls` som en del av indexering kommer att kräva en arkivnod som stöder [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), och subgrafer med `callHandlers` eller `blockHandlers` med en `call`-filtrering kräver `trace_filter`-stöd ([se trace-modulens dokumentation här](https://openethereum.github.io/JSONRPC-trace-module)). +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). @@ -32,9 +32,9 @@ Metadata för distribution av subgraffar lagras på IPFS-nätverket. Graf Node h För att möjliggöra övervakning och rapportering kan Graf Node valfritt logga metrik till en Prometheus-metrisk server. -### Komma igång från källan +### Getting started from source -#### Installera förutsättningar +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ För att möjliggöra övervakning och rapportering kan Graf Node valfritt logga - **IPFS** -- **Ytterligare krav för Ubuntu-användare** - För att köra en Graf Node på Ubuntu kan några ytterligare paket behövas. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Inställning +#### Setup -1. Starta en PostgreSQL-databasserver +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Klona [Graf Node](https://github.com/graphprotocol/graph-node)-repon och bygg källkoden genom att köra `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Nu när alla beroenden är konfigurerade startar du Graf Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Komma igång med Kubernetes -En komplett exempelkonfiguration för Kubernetes finns i [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Portar När Graph Node är igång exponerar den följande portar: -| Port | Syfte | Rutter | Argument för CLI | Miljö Variabel | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP-server
    (för frågor om undergrafer) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (för prenumerationer på undergrafer) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (för hantering av distributioner) | / | --admin-port | - | -| 8030 | Status för indexering av undergrafer API | /graphql | --index-node-port | - | -| 8040 | Prometheus mätvärden | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Viktigt**: Var försiktig med att exponera portar offentligt - **administrationsportar** bör hållas säkra. Detta inkluderar JSON-RPC-slutpunkten för Graph Node. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Avancerad konfiguration av Graf Node På sitt enklaste sätt kan Graph Node användas med en enda instans av Graph Node, en enda PostgreSQL-databas, en IPFS-nod och nätverksklienter som krävs av de subgrafer som ska indexeras. -Denna konfiguration kan skalas horisontellt genom att lägga till flera Graph Nodes och flera databaser för att stödja dessa Graph Nodes. Avancerade användare kan vilja dra nytta av vissa av de horisontella skalningsfunktionerna i Graph Node, liksom några av de mer avancerade konfigurationsalternativen via filen `config.toml` och Graph Nodes miljövariabler. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -En [TOML](https://toml.io/en/) konfigurationsfil kan användas för att ställa in mer komplexa konfigurationer än de som exponeras i CLI. Platsen för filen överförs med kommandoradsomkopplaren --config. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > När du använder en konfigurationsfil är det inte möjligt att använda alternativen --postgres-url, --postgres-secondary-hosts och --postgres-host-weights. -En minimal `config.toml`-fil kan tillhandahållas; följande fil är ekvivalent med att använda kommandoradsalternativet --postgres-url: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -Fullständig dokumentation av `config.toml` hittar du i [Graph Node-dokumentationen](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Flera Grafnoder @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### Regler för utplacering -Med flera Graph Nodes är det nödvändigt att hantera deployering av nya subgrafer så att samma subgraf inte indexeras av två olika noder, vilket skulle leda till kollisioner. Detta kan göras genom att använda deployeringsregler, som också kan specificera vilken `shard` subgrafens data ska lagras i om databasens sharding används. Deployeringsregler kan matcha subgrafens namn och nätverket som deployeringen indexerar för att fatta ett beslut. +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. Exempel på konfiguration av deployeringsregler: @@ -150,7 +150,7 @@ indexers = [ ] ``` -Läs mer om implementeringsregler [här](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Dedikerade frågenoder @@ -167,7 +167,7 @@ Alla noder vars --node-id matchar reguljärt uttryck kommer att konfigureras fö För de flesta användningsfall är en enda Postgres-databas tillräcklig för att stödja en graph-node-instans. När en graph-node-instans växer utöver en enda Postgres-databas är det möjligt att dela upp lagringen av graph-node-data över flera Postgres-databaser. Alla databaser tillsammans bildar lagringsutrymmet för graph-node-instansen. Varje individuell databas kallas en shard. -Shards kan användas för att dela upp subgraffsdeployeringar över flera databaser och kan också användas för att använda kopior för att sprida frågebelastningen över databaser. Detta inkluderar konfigurering av antalet tillgängliga databasanslutningar som varje `graph-node` bör behålla i sin anslutningspool för varje databas, vilket blir allt viktigare när fler subgrafer blir indexerade. +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. Sharding blir användbart när din befintliga databas inte kan hålla jämna steg med belastningen som Graph Node sätter på den och när det inte längre är möjligt att öka databasens storlek. @@ -175,11 +175,11 @@ Sharding blir användbart när din befintliga databas inte kan hålla jämna ste När det gäller att konfigurera anslutningar, börja med max_connections i postgresql.conf som är inställt på 400 (eller kanske till och med 200) och titta på Prometheus-metrarna store_connection_wait_time_ms och store_connection_checkout_count. Märkbara väntetider (något över 5 ms) är en indikation på att det finns för få anslutningar tillgängliga; höga väntetider beror också på att databasen är mycket upptagen (som hög CPU-belastning). Om databasen verkar annars stabil, indikerar höga väntetider att antalet anslutningar behöver ökas. I konfigurationen är det en övre gräns för hur många anslutningar varje graph-node-instans kan använda, och Graph Node kommer inte att hålla anslutningar öppna om det inte behöver dem. -Läs mer om konfiguration av lagring [här](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Intag av dedikerade block -Om det finns flera konfigurerade noder är det nödvändigt att specificera en nod som är ansvarig för inhämtning av nya block, så att alla konfigurerade indexnoder inte frågar huvudet av kedjan. Detta görs som en del av namnrymden `chains`, där du anger `node_id` som ska användas för blockinhämtning: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### Stöd för flera nätverk -Graf Protocol ökar antalet nätverk som stöds för indexering av belöningar, och det finns många undergrafer som indexerar icke-stödda nätverk som en indexerare skulle vilja bearbeta. Filen `config.toml` möjliggör uttrycksfull och flexibel konfiguration av: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Flera nätverk - Flera leverantörer per nätverk (detta kan göra det möjligt att dela upp belastningen mellan leverantörer, och kan också möjliggöra konfiguration av fullständiga noder samt arkivnoder, där Graph Node föredrar billigare leverantörer om en viss arbetsbelastning tillåter det). - Ytterligare information om leverantören, t. ex. funktioner, autentisering och typ av leverantör (för stöd för experimentell Firehose) -Avsnittet `[chains]` styr de ethereum-providers som graph-node ansluter till, och var block och andra metadata för varje kedja lagras. Följande exempel konfigurerar två kedjor, mainnet och kovan, där block för mainnet lagras i vip-sharden och block för kovan lagras i den primära sharden. Mainnet-kedjan kan använda två olika leverantörer, medan kovan bara har en leverantör. +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Läs mer om leverantörsconfiguration [här](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Miljö variabler -Graph Node stöder ett utbud av miljövariabler som kan aktivera funktioner eller ändra Graph Node-beteendet. Dessa är dokumenterade [här](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Kontinuerlig driftsättning Användare som driver en skalad indexering med avancerad konfiguration kan dra nytta av att hantera sina Graph Nodes med Kubernetes. -- Indexeringsförrådet har en [exempel på Kubernetes-referens](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) är en verktygslåda för att köra en Graph Protocol Indexet på Kubernetes som underhålls av GraphOps. Den tillhandahåller en uppsättning Hjelm-diagram och en CLI för att hantera en grafnod-distribution. +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### Hantera Graf Noder @@ -229,23 +229,23 @@ Med en körande Graph Node (eller Graph Nodes!) är utmaningen sedan att hantera #### Loggning -Graph Nodes loggar kan ge användbar information för felsökning och optimering av Graph Node och specifika subgrafer. Graph Node stöder olika loggnivåer via miljövariabeln `GRAPH_LOG`, med följande nivåer: error, warn, info, debug eller trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -Dessutom ger inställningen `GRAPH_LOG_QUERY_TIMING` till `gql` mer information om hur GraphQL-frågor körs (dock kommer detta att generera en stor mängd loggar). +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### Övervakning & varning +#### Monitoring & alerting Graph Node tillhandahåller metrikerna via Prometheus-endpunkt på port 8040 som standard. Grafana kan sedan användas för att visualisera dessa metriker. -Indexer-repositoriet tillhandahåller en [exempel Grafana-konfiguration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman` är ett underhållsverktyg för Graph Node som hjälper till med diagnos och lösning av olika dagliga och exceptionella uppgifter. +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -Kommandot graphman ingår i de officiella containrarna, och du kan köra det med docker exen in i din graph-node-container. Det kräver en `config.toml`-fil. +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Fullständig dokumentation om `graphman`-kommandon finns i Graph Node-repositoriet. Se \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) i Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Arbeta med undergrafer @@ -253,7 +253,7 @@ Fullständig dokumentation om `graphman`-kommandon finns i Graph Node-repositori Tillgänglig som standard på port 8030/graphql, exponerar indexeringstatus-API: en en rad metoder för att kontrollera indexeringstatus för olika subgrafer, kontrollera bevis för indexering, inspektera subgrafegenskaper och mer. -Hela schemat är tillgängligt [här](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Prestanda för indexering @@ -267,8 +267,8 @@ Dessa stadier är pipelinerade (det vill säga de kan utföras parallellt), men Vanliga orsaker till indexeringslångsamhet: -- Tidsåtgång för att hitta relevanta händelser från kedjan (särskilt anropshanterare kan vara långsamma, eftersom de förlitar sig på `trace_filter`) -- Göra ett stort antal `eth_calls` som en del av handläggare +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - En stor mängd butiksinteraktion under exekvering - En stor mängd data att spara i butiken - Ett stort antal evenemang att bearbeta @@ -287,19 +287,19 @@ Under indexering kan subgrafer misslyckas om de stöter på data som är ovänta I vissa fall kan ett misslyckande vara lösbart av indexören (till exempel om felet beror på att det inte finns rätt typ av leverantör, kommer att tillåta indexering att fortsätta om den nödvändiga leverantören läggs till). Men i andra fall krävs en ändring i subgrafkoden. -> Deterministiska misslyckanden betraktas som "slutliga", med en Proof of Indexing genererad för det misslyckande blocket, medan icke-deterministiska misslyckanden inte är det, eftersom subgrafen kanske lyckas "avmisslyckas" och fortsätta indexeringen. I vissa fall är den icke-deterministiska etiketten felaktig, och subgrafen kommer aldrig att övervinna felet; sådana misslyckanden bör rapporteras som problem i Graf Node-repositoriet. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Blockera och anropa cache -Graf Node cachar viss data i lagringen för att undvika att hämtas från leverantören. Block cachas, liksom resultaten av `eth_calls` (det senare cachas från en specifik block). Denna cachning kan dramatiskt öka indexeringens hastighet under "omjustering" av en något ändrad subgraf. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -Men i vissa fall, om en Ethereum-nod har tillhandahållit felaktig data under en period, kan det ta sig in i cachen, vilket leder till felaktig data eller misslyckade subgrafer. I det här fallet kan indexerare använda `graphman` för att rensa den förgiftade cachen och sedan spola tillbaka de påverkade subgraferna, som sedan hämtar färsk data från den (förhoppningsvis) friska leverantören. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Om en blockcache-inkonsekvens misstänks, som att en tx-kvitto saknar händelse: -1. `graphman chain list` för att hitta kedjans namn. -2. `graphman chain check-blocks by-number ` kontrollerar om det cachade blocket matchar leverantören, och tar bort blocket från cacheminnet om det inte gör det. - 1. Om det finns en skillnad kan det vara säkrare att trunkera hela cacheminnet med `graphman chain truncate `. +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. Om blocket matchar leverantören kan problemet felsökas direkt mot leverantören. #### Fråga frågor och fel @@ -312,7 +312,7 @@ Det finns inte en "silverkula", men en rad verktyg för att förebygga, diagnost ##### Fråge cachning -Graf Node cachar GraphQL-frågor som standard, vilket kan minska belastningen på databasen avsevärt. Detta kan konfigureras ytterligare med inställningarna `GRAPH_QUERY_CACHE_BLOCKS` och `GRAPH_QUERY_CACHE_MAX_MEM` - läs mer [här](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Analyserar frågor @@ -320,7 +320,7 @@ Problematiska frågor dyker oftast upp på ett av två sätt. I vissa fall rappo I andra fall kan utlösaren vara hög minnesanvändning på en frågenod, i vilket fall utmaningen först är att identifiera frågan som orsakar problemet. -Indexörer kan använda [qlog](https://github.com/graphprotocol/qlog/) för att bearbeta och sammanfatta Graph Nodes frågeloggar. `GRAPH_LOG_QUERY_TIMING` kan också aktiveras för att hjälpa till att identifiera och felsöka långsamma frågor. +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. Med en långsam fråga har indexörer några alternativ. Självklart kan de ändra sin kostnadsmodell för att kraftigt öka kostnaden för att skicka den problematiska frågan. Detta kan resultera i att frekvensen av den frågan minskar. Men det löser ofta inte grunden till problemet. @@ -328,18 +328,18 @@ Med en långsam fråga har indexörer några alternativ. Självklart kan de änd Databastabeller som lagrar enheter verkar generellt komma i två varianter: 'transaktionsliknande', där enheter, när de väl är skapade, aldrig uppdateras, dvs. de lagrar något liknande en lista över finansiella transaktioner, och 'konto-liknande', där enheter uppdateras mycket ofta, dvs. de lagrar något som finansiella konton som ändras varje gång en transaktion registreras. Tabeller med konto-liknande tabeller karakteriseras av att de innehåller ett stort antal enhetsversioner, men relativt få distinkta enheter. Ofta är antalet distinkta enheter i sådana tabeller 1% av det totala antalet rader (enhetsversioner) -För konto-liknande tabeller kan `graph-node` generera frågor som utnyttjar detaljer om hur Postgres slutligen lagrar data med en så hög förändringsfrekvens, nämligen att alla versioner för nyligen block är i en liten del av den övergripande lagringen för en sådan tabell. +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -Kommandot `graphman stats show visar, för varje enhetstyp/tabell i en deployment, hur många distinkta enheter och hur många enhetsversioner varje tabell innehåller. Den data är baserad på Postgres-interna uppskattningar och är därför nödvändigtvis oprecis och kan vara fel med en ordning av storlek. Ett `-1` i kolumnen `entities` innebär att Postgres tror att alla rader innehåller en distinkt enhet. +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -I allmänhet är tabeller där antalet distinkta enheter är mindre än 1% av det totala antalet rader/enhetsversioner bra kandidater för konto-liknande optimering. När utdata från `graphman stats show` indikerar att en tabell kan dra nytta av denna optimering, kommer att köra `graphman stats show
    ` att utföra en full räkning av tabellen - det kan vara långsamt, men ger en precis mätning av förhållandet mellan distinkta enheter till totalt enhetsversioner. +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -När en tabell har fastställts som konto-liknande, kommer att köra `graphman stats account-like .
    ` att aktivera konto-liknande optimeringen för frågor mot den tabellen. Optimeringen kan stängas av igen med `graphman stats account-like --clear .
    ` Det tar upp till 5 minuter för frågenoder att märka att optimeringen har aktiverats eller stängts av. Efter att ha aktiverat optimeringen är det nödvändigt att verifiera att ändringen faktiskt inte gör att frågor blir långsammare för den tabellen. Om du har konfigurerat Grafana för att övervaka Postgres, skulle långsamma frågor dyka upp i `pg_stat_activity` i stora mängder, ta flera sekunder. I det fallet måste optimeringen stängas av igen. +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -För subgrafer som liknar Uniswap är `pair` och `token` tabeller primära kandidater för denna optimering och kan ha en dramatisk effekt på databasbelastningen. +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### Ta bort undergrafer > Detta är ny funktionalitet, som kommer att vara tillgänglig i Graf Node 0.29.x -Vid någon tidpunkt kan en indexer vilja ta bort en given subgraf. Detta kan enkelt göras via `graphman drop`, som raderar en distribution och all dess indexerade data. Distributionen kan specificeras antingen som ett subgrafnamn, en IPFS-hash `Qm..`, Eller databasens namnrymd `sgdNNN`. Ytterligare dokumentation finns tillgänglig [här](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From d18f53ea6659f52fe3718f38ddb7b61fd2f8642b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:36 -0500 Subject: [PATCH 0125/1534] New translations graph-node.mdx (Turkish) --- .../pages/tr/indexing/tooling/graph-node.mdx | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/website/src/pages/tr/indexing/tooling/graph-node.mdx b/website/src/pages/tr/indexing/tooling/graph-node.mdx index 7a9039808b81..414bfea3161a 100644 --- a/website/src/pages/tr/indexing/tooling/graph-node.mdx +++ b/website/src/pages/tr/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: Graph Düğümü İşletme +title: Graph Node --- Graph Düğümü, subgraphları indeksleyen ve sonuçta oluşan verileri GraphQL API aracılığıyla sorgulanabilir hale getiren bileşendir. Bu nedenle indeksleyici yığınının merkezi bir parçasıdır ve başarılı bir indeksleyici çalıştırmak için Graph Düğümü'nün doğru şekilde çalışması çok önemlidir. -Bu, Graph Düğümü hakkında bağlamsal bir genel bakış ve indeksleyiciler için mevcut olan daha gelişmiş seçenekler hakkında bilgi sağlar. Ayrıntılı belgeler ve talimatlar [Graph Düğümü Github deposunda](https://github.com/graphprotocol/graph-node) bulunabilir. +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Düğümü](https://github.com/graphprotocol/graph-node),Subgraph'ları Graph Ağı üzerinde endeksleme, blok zinciri istemcilerine bağlanma ve indekslenen verileri sorgulanabilir hale getirimek için referans uygulamasıdır. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -Graph Düğümü(ve tüm endeksleyici yığını), bare metal veya bir bulut ortamında çalıştırılabilir. Bu merkezi indeksleme bileşeninin esnekliği, Graph Protokolü'nün dayanıklılığı için önemlidir. Benzer şekilde, Graph Düğümü [kaynaktan oluşturulabilir](https://github.com/graphprotocol/graph-node) veya indeksleyiciler [sağlanan Docker Görüntülerinden](https://hub.docker.com/r/graphprotocol/graph-node) birini kullanabilir. +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL veritabanı @@ -32,9 +32,9 @@ Subgraph dağıtım üst verilerini IPFS ağında depolanır. Graph düğümü, İzleme ve raporlama etkinleştirmek için Graph Düğümü, metrikleri bir Prometheus metrik sunucusuna opsiyonel olarak kaydedebilir. -### Kaynaktan başlama +### Getting started from source -#### Önkoşulları yükleyin +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ Subgraph dağıtım üst verilerini IPFS ağında depolanır. Graph düğümü, - **IPFS** -- **Ubuntu kullanıcıları için Ek Gereksinimler** - Ubuntu üzerinde bir Graph Düğümü çalıştırmak için birkaç ek paket gerekebilir. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Kurulum +#### Setup -1. Bir PostgreSQL veritabanı sunucusu başlatma +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. [Graph Düğümü](https://github.com/graphprotocol/graph-node) github deposunu klonlayın ve `cargo build` çalıştırarak kaynağı derleyin +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Artık tüm bağımlılıklar ayarlandığına göre Graph Düğümü'nü başlatın: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Kubernetes'i kullanmaya başlarken -Tam Kubernetes örnek yapılandırması [indeksleyici Github deposunda](https://github.com/graphprotocol/indexer/tree/main/k8s) bulunabilir. +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Portlar Graph Düğümü çalışırken aşağıdaki portları açar: -| Port | Amaç | Rotalar | CLI Argümanı | Ortam Değişkeni | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP sunucusu
    ( subgraph sorguları için) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    ( subgraph abonelikleri için) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (dağıtımları yönetmek için) | / | --admin-port | - | -| 8030 | Subgraph indeksleme durum API'si | /graphql | --index-node-port | - | -| 8040 | Prometheus metrikleri | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **Önemli**: Bağlantı noktalarını herkese açık olarak açarken dikkatli olun - **yönetim portları** kilitli tutulmalıdır. Bu, Graph Düğümü JSON-RPC uç noktasını içerir. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## Gelişmiş Graph Düğüm yapılandırması En basit haliyle, Graph Düğümü tek bir Graph Düğüm örneği, bir PostgreSQL veritabanı, bir IPFS düğümü ve indekslenecek subgraphlar tarafından gerektirilen ağ istemcileri ile çalıştırılabilir. -Bu yapı birden fazla Graph Düğümü ekleyerek ve bu Graph Düğümlerini desteklemek için birden fazla veritabanı ekleyerek yatay olarak ölçeklenebilir. Gelişmiş kullanıcılar, `config.toml` dosyası ve Graph Düğümü ortam değişkenleri aracılığıyla bazı yatay ölçekleme yeteneklerinden ve daha gelişmiş yapılandırma seçeneklerinden faydalanmak isteyebilirler. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -[TOML](https://toml.io/en/) yapılandırma dosyası, CLI'de sunulanlardan daha karmaşık yapılandırmaları ayarlamak için kullanılabilir. Dosyanın konumu --config komut satırı anahtar kelimesiyle iletilir. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > Yapılandırma dosyası kullanırken --postgres-url, --postgres-secondary-hosts ve --postgres-host-weights seçeneklerinin kullanılması mümkün değildir. -Asgari bir `config.toml` dosyası sağlanabilir. Aşağıdaki dosya, --postgres-url komut satırı seçeneği kullanmakla eşdeğerdir: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -`config.toml`'nin tam dökümantasyonu, [Graph Düğümü belgelerinde](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md) bulunabilir. +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Birden Fazla Graph Düğümü @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### Dağıtım kuralları -Birden fazla Graph Düğümü verildiğinde, aynı subgraph'ın çarpışmalara yol açacak şekilde iki farklı düğüm tarafından indekslenmesinin önüne geçmek için yeni subgraphlar'ın dağıtımını yönetmek gereklidir. Bu, veritabanı sharding kullanılıyorsa bir subgraph'ın verilerinin hangi `shard`'da saklanması gerektiğini de belirtebilen dağıtım kurallarını kullanılarak yapılabilir. Dağıtım kuralları, karar vermek için subgraph adı ve dağıtımın indekslediği ağ ile eşleşebilir. +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. Örnek dağıtım kuralı yapılandırması: @@ -150,7 +150,7 @@ indexers = [ ] ``` -Dağıtım kuralları hakkında daha fazlasını [buradan](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment) okuyun. +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### Özelleştirilmiş sorgu düğümleri @@ -161,13 +161,13 @@ Düğümler, yapılandırma dosyasına aşağıdakini dahil ederek açıkça sor query = "" ``` ---node-id'si düzenli ifade ile eşleşen herhangi bir düğüm, sadece sorgulara yanıt vermek üzere ayarlanacaktır. +\--node-id'si düzenli ifade ile eşleşen herhangi bir düğüm, sadece sorgulara yanıt vermek üzere ayarlanacaktır. #### Sharding ile veritabanı ölçeklendirme Çoğu kullanım durumu için, tek bir Postgres veritabanı bir graph-düğümü örneğini desteklemek için yeterlidir. Bir graph-düğümü örneği tek bir Postgres veritabanından daha büyük hale geldiğinde, bu graph düğümü verilerinin depolanmasını birden fazla Postgres veritabanına yaymak mümkündür. Tüm veritabanları birlikte, graph-düğümü örneğinin deposunu oluşturur. Her tekil veritabanına bir shard denir. -Shard'lar, subgraph dağıtımlarını birden çok veritabanına bölmek için kullanılabilir ve sorgu yükünü veritabanları arasında yaymak için replikaların kullanılmasına da izin verilebilir. Bu, her `graph-düğümü`'nün her veritabanı için bağlantı havuzunda ne kadar mevcut veritabanı bağlantısı olduğunu yapılandırmayı içerir ve daha fazla subgraph'ın indekslendiği durumlarda önem kazanır. +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. Sharding, Graph Düğümü'nün üzerine koyduğu yükü mevcut veritabanınıza koyamadığınızda ve veritabanı boyutunu artıramayacağınızda faydalı hale gelir. @@ -175,11 +175,11 @@ Sharding, Graph Düğümü'nün üzerine koyduğu yükü mevcut veritabanınıza Bağlantı yapılandırması açısından postgresql.conf'da max_connections değerinin 400 (veya belki de 200) olarak ayarlanması ve store_connection_wait_time_ms ve store_connection_checkout_count Prometheus metriklerine bakılması önerilir. Belirgin bekleme süreleri (5 milisaniye'nin üzerinde herhangi bir değer) yetersiz bağlantıların mevcut olduğunun bir işaretidir; yüksek bekleme süreleri veritabanının çok yoğun olması gibi sebeplerden de kaynaklanabilir. Ancak, veritabanı genel olarak stabil görünüyorsa, yüksek bekleme süreleri bağlantı sayısını arttırma ihtiyacını belirtir. Yapılandırmada her graph-düğümü örneğinin ne kadar bağlantı kullanabileceği bir üst sınırdır ve Graph Düğümü bunları gereksiz bulmadığı sürece açık tutmaz. -Depolama yapılandırması hakkında daha fazla bilgi için [burayı](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases) okuyabilirsiniz. +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### Özelleştirilmiş blok alınması -Birden fazla düğüm yapılandırılmışsa yeni blokları işleme sorumluluğu olan bir düğüm belirtmek gerekecektir, böylece yapılandırılmış tüm dizin düğümleri zincir başını sorgulamaz. Bu, zincir (`chains`) ad alanının bir parçası olarak yapılır ve blok yüklemek için kullanılacak düğüm kimliği(`node_id`) belirtilir: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### Birden fazla ağın desteklenmesi -Graph Protokolü, indeksleme ödülleri için desteklenen ağların sayısını arttırıyor ve bir indekleyicinin işlemek isteyebileceği desteklenmeyen ağları indeksleyen birçok subgraph mevcut. c`config.toml` dosyası şunlar gibi anlamlı ve esnek yapılandırmaları destekler: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - Birden fazla ağ - Ağ başına birden fazla sağlayıcı (bu, yükü sağlayıcılar arasında bölme ve bir Graph Düğümü'nün deneyimsel Firehose desteği gibi daha ucuz sağlayıcıları tercih etmesi ile tam düğümlerin yanı sıra arşiv düğümlerinin yapılandırılmasına da izin verebilir). - Özellikler, kimlik doğrulama ve sağlayıcı türü gibi ek sağlayıcı detayları (deneysel Firehose desteği için) -`[chains]` bölümü, graph-düğümü'nün bağlandığı ethereum sağlayıcılarını ve her zincir için blokların ve diğer üst verilerin nerede depolandığını kontrol eder. Aşağıdaki örnek, mainnet için blokların vip shard'da depolandığı ve kovan için blokların primary shard'da depolandığı olmak üzere iki zinciri, mainnet ve kovan'ı yapılandırır. Mainnet zinciri iki farklı sağlayıcı kullanabilirken, kovan yalnızca bir sağlayıcıya sahiptir. +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Sağlayıcı yapılandırması hakkında daha fazla bilgi için [burayı](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers) okuyabilirsiniz. +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### Ortam değişkenleri -Graph Düğümü, özellikleri etkinleştirebilen veya Graph Düğümü davranışını değiştirebilen bir dizi çevre değişkeni destekler. Bunlar [burada](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) belgelenmiştir. +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### Sürekli dağıtım Gelişmiş yapılandırmaya sahip ölçeklendirilmiş bir dizinleme kurulumu işleten kullanıcılar, Graph Düğümler'ini Kubernetes ile yönetmekten faydalanabilirler. -- İndeksleyici github deposunda bir [Kubernetes referansı örneği](https://github.com/graphprotocol/indexer/tree/main/k8s) bulunmaktadır -- [Launchpad](https://docs.graphops.xyz/launchpad/intro), GraphOps tarafından yönetilen Kubernetes üzerinde Graph Protokol indeksleyicisi çalıştırmak için kullanılan bir araç setidir. Graph Düğümü dağıtımını yönetmek için bir dizi Helm şeması ve bir CLI sağlar. +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### Graph Düğümü Yönetimi @@ -229,23 +229,23 @@ Gelişmiş yapılandırmaya sahip ölçeklendirilmiş bir dizinleme kurulumu iş #### Kayıt tutma -Graph Düğümü'nün kayıtları, Graph Düğümü ve belirli subgraphlar'ın hata ayıklanması ve optimizasyonu için faydalı bilgiler sağlayabilir. Graph Düğümü, `GRAPH_LOG` ortam değişkeni aracılığıyla farklı kayıt seviyelerini destekler ve şu seviyeleri içerir: error, warn, info, debug veya trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -Ek olarak, `GRAPH_LOG_QUERY_TIMING` `gql` olarak ayarlanması GraphQL sorgularının nasıl çalıştığı hakkında daha fazla ayrıntı sağlar (ancak bu, büyük bir kayıt hacmi oluşturacaktır). +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### Görüntüleme & uyarma +#### Monitoring & alerting Graph Düğümü, varsayılan olarak 8040 port'undaki Prometheus uç noktası aracılığıyla metrikleri sağlar. Ardından Grafana, bu metrikleri görselleştirmek için kullanılabilir. -İndeksleyici github deposu [Grafana yapılandırmasına bir örnek](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml) sağlar. +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman`, Graph Düğümü'nün bakım aracıdır ve farklı günlük görevlerinin teşhis ve çözümüne yardımcı olur. +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -Graphman komutu, resmi konteynerlara dahil edilmiştir ve graph-düğümü konteynerınıza docker exec ile girerek çalıştırabilirsiniz. Bu bir `config.toml` dosyasına ihtiyaç duyar. +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -`graphman` komutlarının tam belgeleri Graph Düğümü github deposunda mevcuttur. Graph Düğümü `/docs`'da bulunan \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) bağlantısına bakın +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Subgraphlarla çalışma @@ -253,7 +253,7 @@ Graphman komutu, resmi konteynerlara dahil edilmiştir ve graph-düğümü konte Varsayılan olarak 8030/graphql port'unda mevcut olan indeksleme durumu API'si, farklı subgraphlar için indeksleme durumunu ve ispatlarını kontrol etmek, subgraph özelliklerini incelemek ve daha fazlasını yapmak için çeşitli yöntemler sunar. -Tam şema [burada](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) mevcut. +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### Endeksleme performansı @@ -267,8 +267,8 @@ Bu aşamalar boru hattında (yani eşzamanlı olarak yürütülebilir), ancak bi İndeksleme yavaşlığının yaygın nedenleri: -- Zincirdeki ilgili olayları bulmak için geçen süre (özellikle çağrı yönlendiricileri, `trace_filter`'a bağımlı oldukları için yavaş olabilir) -- İşleyicilerin bir parçası olarak çok fazla `eth_calls` yapmak +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - Yürütme sırasında büyük miktarda depolama etkileşimi - Depoya kaydedilecek büyük miktarda veri - İşlenecek büyük miktarda olay @@ -287,19 +287,19 @@ Subgraph indeksleme metrikleri, indeksleme yavaşlığının temel nedenini teş Bazı durumlarda, başarısızlık indeksleyici tarafından çözülebilir (örneğin, hatanın doğru türde sağlayıcıya sahip olmamasından kaynaklanması durumunda, gerekli sağlayıcı eklenirse indeksleme devam ettirilebilir). Ancak diğer durumlarda, subgraph kodunda bir değişiklik gereklidir. -> Belirleyici başarısızlıklar, başarısız blok için oluşturulan İndeksleme Kanıtı ile "final" olarak kabul edilirken, deterministik olmayan başarısızlıklar subgraph'ın "unfail"i idare edip indekslememye devam edebileceğinden "final" olarak kabul edilmez. Bazı durumlarda, deterministik olmayan etiketi yanlış olabilir ve subgraph hatayı asla aşamayabilir. Bu tür başarısızlıklar, Graph Düğümü github deposunda bir sorun olarak bildirilmelidir. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Blok ve çağrı önbelleği -Graph Düğümü, sağlayıcıdan tekrar alma işlemini kaydetmek için depoda belirli verileri önbelleğe alır. Bloklar ve `eth_calls` sonuçları önbelleğe alınır (bu sonuncusu belirli bir bloktan itibaren önbelleğe alınır). Bu önbellekleme, azca değiştirilmiş bir subgraph'ın "yeniden senkronizasyonu" sırasında indeksleme hızını büyük ölçüde artırabilir. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -Ancak bazı örneklerde, Ethereum düğümü bir süre boyunca yanlış veri sağlamışsa, bu önbelleğe girebilir ve yanlış verilere veya subgraphlar'ın başarısız olmasına neden olabilir. Bu durumda, indeksleyiciler zehirlenmiş önbelleği temizlemek için `graphman` kullanabilir ve etkilenen subgraphlar'ı geri sarabilir, böylece (umarız) sağlıklı sağlayıcıdan temiz verileri alabilirler. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. Örneğin tx makbuzu etkinlik eksikliği gibi bir blok önbellek tutarsızlığı şüphesi varsa: -1. zincir ismini bulmak için `graphman chain list`. -2. `graphman chain check-blocks by-number `, önbelleğe alınan bloğun sağlayıcıyla eşleşip eşleşmediğini kontrol edecek ve eşleşmiyorsa bloğu önbellekten silecek. - 1. Bir fark varsa, tüm önbelleği `graphman chain truncate ` ile kesmek daha güvenli olabilir. +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. Blok sağlayıcıyla eşleşirse, sorun doğrudan sağlayıcıya karşı hata ayıklanabilir. #### Sorgulama sorunları ve hataları @@ -312,7 +312,7 @@ Tek bir "sihirli çözüm" yoktur, ancak yavaş sorguların önlenmesi, teşhisi ##### Sorgu önbellekleme -Graph Düğümü, varsayılan olarak GraphQL sorgularını önbelleğe alarak veritabanı yükünü önemli ölçüde azaltabilir. Bu, `GRAPH_QUERY_CACHE_BLOCKS` ve `GRAPH_QUERY_CACHE_MAX_MEM` ayarları ile daha da yapılandırılabilir - [buradan](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching) daha fazla bilgi edinin. +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### Sorguların analizi @@ -320,7 +320,7 @@ Sorunlu sorgular genellikle iki şekilde ortaya çıkar. Bazı durumlarda, kulla Diğer durumlarda, tetikleyici sorgu düğümündee yüksek bellek kullanımı olabilir, bu durumda zorluk ilk olarak soruna neden olan sorguyu belirlemektir. -İndeksleyiciler [qlog](https://github.com/graphprotocol/qlog/) kullanarak Graph Düğümü'nün sorgu kayıtlarını işleyebilir ve özetleyebilir. Ayrıca `GRAPH_LOG_QUERY_TIMING` yavaş sorguların tanımlamak ve ayıklamak için etkinleştirilebilir. +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. Yavaş bir sorgu verildiğinde, indeksleyicilerin birkaç seçeneği vardır. Tabii ki, sorunlu sorgunun gönderilme maliyetini önemli ölçüde artırmak için maliyet modelini değiştirebilirler. Bu, o sorgunun sıklığında azalmaya neden olabilir. Ancak, genellikle sorunun temek nedenini çözmez. @@ -328,18 +328,18 @@ Yavaş bir sorgu verildiğinde, indeksleyicilerin birkaç seçeneği vardır. Ta Varlıkları depolayan veritabanı tablolarının genellikle iki çeşit olduğu görünmektedir: oluşturulduktan sonra hiçbir zaman güncellenmeyen mesela finansal işlemler listesine benzer şeyler saklayan olan 'işlemimsi' ve varlıkların çok sık güncellendiği, mesela her işlem kaydedildiğinde değiştirilen finansal hesaplar gibi şeyler saklayan 'hesabımsı'. Hesabımsı tablolar, birçok varlık sürümünü içermelerine rağmen, nispeten az sayıda farklı varlığa sahip olmasıyla bilinir. Çoğu durumda, böyle bir tabloda farklı varlık sayısı, toplam satır (varlık sürümleri) sayısının %1'ine eşittir -Hesabımsı tablolar için, `graph-node`, Postgres'in verileri nasıl bu kadar yüksek bir değişim oranıyla depolamaya başladığına dair ayrıntılardan yararlanan sorgular oluşturabilir, yani böyle bir tablo için son blokların tüm sürümleri genel depolamanın küçük bir alt bölümünde yer alır. +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -`graphman stats show komutu, bir dağıtımdaki her varlık türü/tablosu için kaç farklı varlık ve her tablonun kaç varlık sürümü içerdiğini gösterir. Bu veriler Postgres dahili tahminlerine dayanır ve bu nedenle doğruluğu kesin olmayabilir ve bir büyüklük sırasına nazaran yanıltıcı olabilir. `entities` sütununda `-1`, Postgres'un tüm satırların farklı bir varlık içerdiğine inandığını gösterir. +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -Genel olarak, farklı varlıkların sayısı toplam satır/varlık sürümü sayısının %1'inden az olan tablolar, hesap-tablo optimizasyonu için iyi adaylardır. `graphman stats show` çıktısı, bir tablonun bu optimizasyondan faydalanabileceğini gösteriyorsa, `graphman stats show
    `'ı çalıştırmak, tablonun tam sayımını gerçekleştirir. - bu yavaş olabilir, ancak farklı varlıkların toplam varlık sürümlerine oranı kesin bir ölçüdür. +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -Bir tablonun hesabımsı olduğu belirlendikten sonra `graphman stats account-like .
    `'ı çalıştırmak, bu tabloya karşı sorgular için hesabımsı optimizasyonu etkinleştirecektir. Optimizasyon, `graphman stats account-like --clear .
    ` ile tekrar kapatılabilir. Optimizasyonun etkinleştirildiğinin veya kapatıldığının sorgu düğümleri tarafından fark edilmesi için en fazla 5 dakika beklemek gereklidir. Optimizasyonu açtıktan sonra, değişikliğin söz konusu tablo için sorguları daha yavaş hale getirmediğinden emin olmak için doğrulama yapılması gerekir. Postgres'i izlemek için Grafana'yı yapılandırdıysanız, yavaş sorgular `pg_stat_activity` bölümünde büyük sayılarda ve birkaç saniye süren işlemler şeklinde görünecektir. Bu durumda, optimizasyon tekrar kapatılmalıdır. +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -Uniswapımsı subgraplar için, çift (`pair`) ve `token` tabloları bu optimizasyon için en uygun adaylardır ve veritabanı yükü üzerinde etkili bir etkiye sahip olabilirler. +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### Subgraphları kaldırma > Bu, Graph Node 0.29.x sürümünde kullanılabilir olan yeni bir fonksiyonelliktir -Bir noktada indeksleyici, belirli bir subgraph'ı kaldırmak isteyebilir. Bu, tüm indekslenmiş verileri ve bir dağıtımı silen `graphman drop` komutuyla kolayca gerçekleştirilebilir. Dağıtım, subgraph adı, bir IPFS hash `Qm..` veya veritabanı ad alanı `sgdNNN` olarak belirtilebilir. Daha fazla belgeye [buradan](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) erişilebilir. +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 8943101f4e0251e371c024d3b1336f4c5374dd49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:37 -0500 Subject: [PATCH 0126/1534] New translations graph-node.mdx (Ukrainian) --- .../pages/uk/indexing/tooling/graph-node.mdx | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/website/src/pages/uk/indexing/tooling/graph-node.mdx b/website/src/pages/uk/indexing/tooling/graph-node.mdx index 30a9ee532653..6a27301b680b 100644 --- a/website/src/pages/uk/indexing/tooling/graph-node.mdx +++ b/website/src/pages/uk/indexing/tooling/graph-node.mdx @@ -1,5 +1,5 @@ --- -title: Operating Graph Node +title: Graph Node --- Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -32,9 +32,9 @@ Subgraph deployment metadata is stored on the IPFS network. The Graph Node prima To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### Початок роботи з базового коду +### Getting started from source -#### Встановіть необхідні умови +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Додаткові вимоги для користувачів Ubuntu** - Для запуску Graph Node на Ubuntu може знадобитися декілька додаткових програм. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Налаштування +#### Setup -1. Запуск сервера бази даних PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Клонуйте [Graph Node](https://github.com/graphprotocol/graph-node) репозиторій і створіть базовий код, запустивши `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Тепер, коли всі необхідні складові налаштовано, запустіть Graph Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Порт | Призначення | Розташування | Аргумент CLI | Перемінна оточення | -| --- | --- | --- | --- | --- | -| 8000 | HTTP-сервер GraphQL
    (для запитів до підграфів) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-порт | - | -| 8001 | GraphQL WS
    (для підписок на підграфи) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (для керування розгортаннями) | / | --admin-port | - | -| 8030 | API стану індексації підграфів | /graphql | --index-node-port | - | -| 8040 | Метрики Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. @@ -245,7 +245,7 @@ The indexer repository provides an [example Grafana configuration](https://githu The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Working with subgraphs @@ -287,7 +287,7 @@ During indexing subgraphs might fail, if they encounter data that is unexpected, In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache From 22eb8679a98bc22a11679372d080f89a237c9b55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:39 -0500 Subject: [PATCH 0127/1534] New translations graph-node.mdx (Chinese Simplified) --- .../pages/zh/indexing/tooling/graph-node.mdx | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/website/src/pages/zh/indexing/tooling/graph-node.mdx b/website/src/pages/zh/indexing/tooling/graph-node.mdx index 0847fd1c03c6..afdb77db8f2d 100644 --- a/website/src/pages/zh/indexing/tooling/graph-node.mdx +++ b/website/src/pages/zh/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: 运营Graph节点 +title: Graph 节点 --- -Graph节点是索引子图的组件,并使生成的数据可通过GraphQL API进行查询。因此,它是索引人堆栈的中心,Graph节点的正确运作对于运行成功的索引人至关重要。 +Graph节点是索引子图的组件,并使生成的数据可通过GraphQL API进行查询。因此,它是索引器堆栈的中心,Graph节点的正确运作对于运行成功的索引器至关重要。 -这提供了Graph节点的背景概述,以及索引人可用的一些更高级的选项。可以在[Graph 节点存储库](https://github.com/graphprotocol/graph-node)中找到详细的文档和说明。 +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## Graph 节点 -[Graph Node](https://github.com/graphprotocol/graph-node)是Graph Network上索引子图、连接到区块链客户端、索引子图并使索引数据可供查询的参考实现。 +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -Graph节点(以及整个索引人堆栈)可以在裸机上运行,也可以在云环境中运行。中央索引组件的这种灵活性对于图形协议的有力性至关重要。类似地,Graph 节点可以从[源代码构建](https://github.com/graphprotocol/graph-node),或者索引人可以使用[提供的Docker Images](https://hub.docker.com/r/graphprotocol/graph-node)之一。 +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL 数据库 @@ -20,7 +20,7 @@ Graph节点的主存储区,这是存储子图数据、子图元数据以及子 为了索引网络,Graph节点需要通过以太坊兼容的JSON-RPC访问网络客户端。此RPC可能连接到单个以太坊客户端,也可能是跨多个客户端进行负载平衡的更复杂的设置。 -虽然有些子图可能只需要一个完整的节点,但有些子图的索引功能可能需要额外的RPC功能。特别是,将`eth_calls`作为索引的一部分的子图需要一个支持[EIP-1898](https://eips.ethereum.org/EIPS/eip-1898)的归档节点,而带有`callHandlers`或带有`调用`筛选器的`blockHandlers`的子图则需要`trace_filter`支持[(请参阅此处的跟踪模块文档)](https://openethereum.github.io/JSONRPC-trace-module)。 +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). **Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). @@ -32,25 +32,25 @@ Graph节点的主存储区,这是存储子图数据、子图元数据以及子 为了实现监控和报告,Graph节点可以选择将指标记录到Prometheus指标服务器。 -### 从来源开始 +### Getting started from source -#### 安装先决条件 +#### Install prerequisites -- **锈** +- **Rust** - **PostgreSQL** - **IPFS** -- **Ubuntu 用户的附加要求** - 要在 Ubuntu 上运行 Graph 节点,可能需要一些附加的软件包。 +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### 设置 +#### Setup -1. 启动 PostgreSQL 数据库服务器 +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. 克隆[Graph 节点](https://github.com/graphprotocol/graph-node)repo,并通过运行 `cargo build`来构建源代码。 +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. 现在,所有的依赖关系都已设置完毕,启动 Graph节点。 +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Kubernetes入门 -完整的Kubernetes示例配置可以在[索引人存储库](https://github.com/graphprotocol/indexer/tree/main/k8s)中找到。 +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### 端口 当运行Graph Node时,会暴露以下端口: -| 端口 | 用途 | 路径 | CLI 参数 | 环境 变量 | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP 服务
    (用于子图查询) | /subgraphs/id/...

    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (用于子图订阅) | /subgraphs/id/...

    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (用于管理部署) | / | --admin-port | - | -| 8030 | 子图索引状态 API | /graphql | --index-node-port | - | -| 8040 | Prometheus 指标 | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **重要**: 公开暴露端口时要小心 - **管理端口** 应保持锁定。 这包括下面详述的 Graph 节点 JSON-RPC 和索引人管理端点。 +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## 高级 Graph 节点配置 最简单的是,Graph节点可以使用Graph节点的单个实例、单个PostgreSQL数据库、IPFS节点和要索引的子图所需的网络客户端来操作。 -通过添加多个Graph节点和多个数据库以支持这些Graph节点,可以水平扩展此设置。高级用户可能希望通过`config.toml`文件和Graph节点的环境变量,利用Graph节点的一些水平扩展功能以及一些更高级的配置选项。 +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -[TOML](https://toml.io/en/)配置文件可用于设置比CLI中公开的配置更复杂的配置。文件的位置通过--config命令行开关传递。 +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > 使用配置文件时,不能使用选项--postgres-url、--postgres-secondary-hosts和--postgres-host-weights。 -可以提供最小的`config.toml`文件;以下文件等效于使用--postgres-url命令行选项: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -`config.toml`的完整文档可以在[Graph Node文档](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md)中找到。 +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### 多个 Graph 节点 @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### 部署规则 -给定多个Graph节点,有必要管理新子图的部署,以便同一子图不会被两个不同的节点索引, 这会导致冲突。这可以通过使用部署规则来实现,如果正在使用数据库`shard`,部署规则还可以指定子图的数据应该存储在哪个分片中。部署规则可以与子图名称和部署所索引的网络相匹配,以便做出决策。 +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. 部署规则配置示例: @@ -150,7 +150,7 @@ indexers = [ ] ``` -在[此处](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment)阅读有关部署规则的更多信息。 +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### 专用查询节点 @@ -167,7 +167,7 @@ query = "" 对于大多数用例,单个Postgres数据库足以支持graph节点实例。当一个graph节点实例超过一个Postgres数据库时,可以将graph节点的数据存储拆分到多个Postgres数据库中。所有数据库一起构成graph节点实例的存储。每个单独的数据库都称为分片。 -分片可用于在多个数据库中拆分子图部署,也可用于使用副本在数据库之间分散查询负载。这包括配置每个`graph-node` 应在其连接池中为每个数据库保留的可用数据库连接,随着索引的子图越来越多,这一点变得越来越重要。 +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. 当您的现有数据库无法跟上Graph节点给它带来的负载时,以及当无法再增加数据库大小时,分片变得非常有用。 @@ -175,11 +175,11 @@ query = "" 在配置连接方面,首先将 postgresql.conf 中的 max_connections 设置为400(或甚至200),然后查看 store_connection_wait_time_ms 和 store_connecion_checkout_count Prometheus 度量。明显的等待时间(任何超过5ms的时间)表明可用连接太少;高等待时间也将由数据库非常繁忙(如高CPU负载)引起。然而,如果数据库在其他方面看起来很稳定,那么高等待时间表明需要增加连接数量。在配置中,每个graph节点实例可以使用的连接数是一个上限,如果不需要,Graph节点将不会保持连接打开。 -在[此处](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases)阅读有关存储配置的更多信息。 +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### 专用区块摄取 -如果配置了多个节点,则需要指定一个负责接收新区块的节点,这样所有配置的索引节点都不会轮询链头。这是作为`chains`命名空间的一部分完成的,指定用于区块摄取的`node_id`: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### 支持多个网络 -Graph协议正在增加支持索引奖励的网络数量,并且存在许多索引不支持的网络的子图,索引人希望处理这些子图。`config.toml`文件允许表达和灵活配置: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - 多个网络。 - 每个网络有多个提供程序(这可以允许跨提供程序分配负载,也可以允许配置完整节点和归档节点,如果给定的工作负载允许,Graph Node更喜欢便宜些的提供程序)。 - 其他提供商详细信息,如特征、身份验证和提供程序类型(用于实验性Firehose支持)。 -`[chains]`部分控制graph节点连接到的以太坊提供程序,以及每个链的区块和其他元数据的存储位置。以下示例配置了两个链,mainnet和kovan,其中mainnet的区块存储在vip分片中,而kovan的区块则存储在主分片中。主网链可以使用两个不同的提供商,而kovan只有一个提供商。 +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -在[此处](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers)阅读有关存储配置的更多信息。 +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### 环境变量 -Graph节点支持一系列环境变量,这些变量可以启用特征或更改Graph节点的行为。这些都记录在[这里](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md)。 +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### 持续部署 使用高级配置操作缩放索引设置的用户可以从使用Kubernetes管理Graph节点中受益。 -- 索引人存储库有一个示例[Kubernetes引用](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro)是一个工具包,用于在GraphOps维护的Kubernetes上运行Graph Protocol Indexer。它提供了一组Helm图表和一个CLI来管理Graph Node部署。 +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### 管理Graph节点 @@ -229,23 +229,23 @@ Graph节点支持一系列环境变量,这些变量可以启用特征或更改 #### 日志 -Graph节点的日志可以为Graph节点和特定子图的调试和优化提供有用的信息。Graph节点通过`GRAPH_LOG`环境变量支持不同的日志级别,具有以下级别:错误、警告、信息、调试或跟踪。 +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -此外,将`GRAPH_LOG_QUERY_TIMING`设置为`gql`提供了有关GraphQL查询如何运行的更多详细信息(尽管这将生成大量日志)。 +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### 监控& 警报 +#### Monitoring & alerting 默认情况下,Graph Node通过8040端口上的Prometheus端点提供指标。然后可以使用Grafana来可视化这些指标。 -索引人存储库提供了[Grafana配置示例](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml)。 +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman`是Graph节点的维护工具,帮助诊断和解决不同的日常和异常任务。 +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -Graphman命令包含在官方容器中,您可以将docker exec插入到 Graph 节点容器中运行它。它需要一个`config.toml`文件。 +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Graph节点存储库中提供了`graphman`命令的完整文档。参见\[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md)Graph节点`/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### 使用子图 @@ -253,7 +253,7 @@ Graph节点存储库中提供了`graphman`命令的完整文档。参见\[/docs/ 默认情况下,在端口8030/graphql上可用,索引状态API公开了一系列方法,用于检查不同子图的索引状态、检查索引证明、检查子图特征等。 -[此处](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql)提供完整的模式。 +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### 索引性能 @@ -267,8 +267,8 @@ Graph节点存储库中提供了`graphman`命令的完整文档。参见\[/docs/ 索引速度慢的常见原因: -- 从链中查找相关事件所需的时间(特别是调用处理程序可能慢,因为依赖`trace_filter`) -- 将大量`eth_calls`作为处理程序的一部分 +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - 执行期间大量的存储交互 - 要保存到存储的大量数据 - 要处理的大量事件 @@ -287,24 +287,24 @@ Graph节点存储库中提供了`graphman`命令的完整文档。参见\[/docs/ 在某些情况下,索引人可能会解决故障(例如,如果错误是由于没有正确类型的提供程序导致的,则添加所需的提供程序将允许继续索引)。然而,在其他情况下,需要更改子图代码。 -> 确定性故障被视为“最终”,并为故障区块生成索引证明,而非确定性故障则不是,因为子图可能会“可靠”并继续索引。在某些情况下,非确定性标签是不正确的,子图永远无法克服错误;此类故障应报告为Graph节点存储库中的问题。 +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### 区块和调用缓存 -Graph节点在存储中缓存某些数据,以保存来自提供程序的重新绘制。区块被缓存,`eth_calls`的结果也被缓存(后者作为特定区块被缓存)。这种缓存可以在稍微改变的子图的“重新同步”期间显著提高索引速度。 +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -然而,在某些情况下,如果以太坊节点在一段时间内提供了错误的数据,这可能会进入缓存,导致错误的数据或失败子图。在这种情况下,索引人可以使用`graphman`清除不良的缓存,倒回受影响的子图,然后从(希望)健康提供程序获取新数据。 +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. 如果怀疑区块缓存不一致,例如tx收据丢失事件: -1. `graphman链列表`以查找链名称。 -2. `graphman chain check-blocks by-number ` 将检查缓存的块是否与提供程序匹配,如果不匹配,则从缓存中删除该块。 - 1. 如果存在差异,则使用`graphman chain truncate `截断整个缓存可能更安全。 +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. 如果区块与提供程序匹配,则可以直接针对提供程序调试问题。 #### 查询问题和错误 -一旦子图被索引,索引人就可以期望通过子图的专用查询端点来服务查询。如果索引人希望为大量查询量提供服务,建议使用专用查询节点,如果查询量非常大,索引人可能需要配置副本分片,以便查询不会影响索引过程。 +一旦子图被索引,索引器就可以期望通过子图的专用查询端点来服务查询。如果索引器希望为大量查询量提供服务,建议使用专用查询节点,如果查询量非常大,索引器可能需要配置副本分片,以便查询不会影响索引过程。 然而,即使使用专用的查询节点和副本,某些查询也可能需要很长时间才能执行,在某些情况下还会增加内存使用量,并对其他用户的查询时间产生负面影响。 @@ -312,7 +312,7 @@ Graph节点在存储中缓存某些数据,以保存来自提供程序的重新 ##### 查询缓存 -默认情况下,Graph 节点缓存GraphQL查询,这可以显著减少数据库负载。这可以进一步使用`GRAPH_QUERY_CACHE_BLOCKS`和`GRAPH_QUERY_CACHE-MAX_MEM`设置进行配置-请在[此处](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching)阅读更多信息。 +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### 分析查询 @@ -320,26 +320,26 @@ Graph节点在存储中缓存某些数据,以保存来自提供程序的重新 在其他情况下,触发因素可能是查询节点上的高内存使用率,在这种情况下,首要挑战是要确定导致问题的查询。 -索引器可以使用[qlog](https://github.com/graphprotocol/qlog/)来处理和汇总Grap​​h Node的查询日志。 `GRAPH_LOG_QUERY_TIMING` 也可以启用以帮助识别和调试慢速查询 +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. -针对慢查询,索引器有几个选项。当然,他们可以改变成本模型,显著增加发送有问题查询的成本。这可能导致该查询的频率降低。然而,这通常并不能解决问题的根本原因。 +针对慢查询,索引人有几个选项。当然,他们可以改变成本模型,显著增加发送有问题查询的成本。这可能导致该查询的频率降低。然而,这通常并不能解决问题的根本原因。 ##### 账户式优化 存储实体的数据库表通常有两种类型:“类交易”,即实体一旦创建,就永远不会更新,即存储类似于金融交易列表的内容;“类账户”,即经常更新实体,即存储每次记录交易时都会修改的类似金融账户的内容。类账户表的特点是,它们包含大量实体版本,但不同的实体相对较少。通常,在这种表中,不同实体的数量是行总数的1%(实体版本)。 -对于类似账户的表,`graph-node` 可以生成查询,利用Postgres如何以如此高的变化率存储数据的细节,即最近区块的所有版本都位于此类表的总存储的一小部分中。 +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -命令`graphman stats show 显示部署中每个实体类型/表的不同实体数量以及每个表包含的实体版本。该数据基于Postgres的内部估计,因此必然是不精确的,可能会偏离一个数量级。`entity`列中的`-1` 表示Postgres认为所有行都包含一个不同的实体。 +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -通常,不同实体的数量小于行/实体版本总数的1%的表是类似账户的优化的好候选表。当`graphman stats show`的输出表明表可能从该优化中受益时,运行`graphman stats show
    `将执行表的完整计数-这可能会很慢,但可以精确测量不同实体与整体实体版本的比率。 +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -一旦一个表被确定为类似账户,运行`graphman stats account-like
    `将为针对该表的查询启用类似账户的优化。可以使用`graphman stats account-like --clear
    )`再次关闭优化查询节点,最多需要5分钟才能注意到优化已打开或关闭。打开优化后,需要验证更改实际上不会使该表的查询变慢。如果您已经将Grafana配置为监视Postgres,那么在`pg_stat_activity`中会出现大量缓慢的查询,需要几秒钟。在这种情况下,需要再次关闭优化。 +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -对于类似Uniswap的子图,`pair` 和 `token`表是这种优化的主要候选项,并且可以对数据库负载产生显著影响。 +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### 删除子图 -> 这是一项新功能,将在Graph Node 0.29.x中提供。 +> 这是一项新功能,将在Graph节点0.29.x中提供。 -在某个时刻,索引人可能想要删除给定的子图。这可以通过删除部署及其所有索引数据的`graphman drop`, 轻松完成。部署可以被指定为子图名称、IPFS has、`Qm..`,或数据库命名空间`sgdNNN`。[此处](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop)提供了更多文档。 +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From ca8c482aea4a312495d08d445cdedbdd66feb170 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:40 -0500 Subject: [PATCH 0128/1534] New translations graph-node.mdx (Urdu (Pakistan)) --- .../pages/ur/indexing/tooling/graph-node.mdx | 124 +++++++++--------- 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/website/src/pages/ur/indexing/tooling/graph-node.mdx b/website/src/pages/ur/indexing/tooling/graph-node.mdx index 53ed532c07f8..c1c097f2a228 100644 --- a/website/src/pages/ur/indexing/tooling/graph-node.mdx +++ b/website/src/pages/ur/indexing/tooling/graph-node.mdx @@ -1,16 +1,16 @@ --- -title: گراف نوڈ کو آپریٹ کرنا +title: گراف نوڈ --- گراف نوڈ وہ جزو ہے جو سب گراف کو انڈیکس کرتا ہے، اور نتیجے میں ڈیٹا کو GraphQL API کے ذریعے کیوری کے لیے دستیاب کرتا ہے. اس طرح یہ انڈیکسر اسٹیک میں مرکزی حیثیت رکھتا ہے، اور ایک کامیاب انڈیکسر چلانے کے لیے گراف نوڈ کا درست آپریشن بہت ضروری ہے. -یہ گراف نوڈ کا سیاق و سباق کا جائزہ فراہم کرتا ہے، اور انڈیکسرز کے لیے دستیاب کچھ زیادہ جدید اختیارات فراہم کرتا ہے. تفصیلی دستاویزات اور ہدایات [گراف نوڈ ریپوزٹری](https://github.com/graphprotocol/graph-node) میں مل سکتی ہیں. +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## گراف نوڈ -[گراف نوڈ](https://github.com/graphprotocol/graph-node) گراف نیٹ ورک پر سب گرافس کو انڈیکس کرنے، بلاکچین کلائنٹس سے منسلک کرنے، سب گرافس کو انڈیکس کرنے اور انڈیکس شدہ ڈیٹا کو دستیاب کرنے کے لیے حوالہ عمل ہے. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -گراف نوڈ (اور پورا انڈیکسر اسٹیک) ننگی بیئر میٹل پر، یا کلاؤڈ ماحول میں چلایا جا سکتا ہے. مرکزی انڈیکسنگ کے جزو کی یہ لچک گراف پروٹوکول کی مضبوطی کے لیے اہم ہے. اسی طرح، گراف نوڈ کو [سورس کے ذریعہ سے بنایا جا سکتا ہے](https://github.com/graphprotocol/graph-node)، یا انڈیکسرز [فراہم کردہ ڈوکر کی تصاویر](https://hub.docker.com/r/graphprotocol/graph-node) میں سے کسی ایک کو استعمال کر سکتے ہیں. +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL ڈیٹا بیس @@ -20,9 +20,9 @@ title: گراف نوڈ کو آپریٹ کرنا کسی نیٹ ورک کو انڈیکس کرنے کے لیے، گراف نوڈ کو EVM سے مطابقت رکھنے والے JSON-RPC API کے ذریعے نیٹ ورک کلائنٹ تک رسائی کی ضرورت ہے۔ یہ RPC کسی ایک کلائنٹ سے منسلک ہو سکتا ہے یا یہ زیادہ پیچیدہ سیٹ اپ ہو سکتا ہے جو متعدد پر بیلنس لوڈ کرتا ہے. -اگرچہ کچھ سب گراف کو صرف ایک مکمل نوڈ کی ضرورت ہو سکتی ہے، کچھ میں انڈیکسنگ کی خصوصیات ہوسکتی ہیں جن کے لیے اضافی RPC فعالیت کی ضرورت ہوتی ہے۔ خاص طور پر سب گراف جو `eth_calls` کو انڈیکسنگ کے حصے کے طور پر بناتے ہیں ان کے لیے آرکائیو نوڈ کی ضرورت ہوگی جو [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) کو سپورٹ کرتا ہو، اور `callHandlers`، یا `blockHandlers` کے ساتھ `call` فلٹر کے ساتھ سب گراف، `trace_filter` سپورٹ کی ضرورت ہے ([ٹریس ماڈیول کی دستاویزات یہاں دیکھیں](https://openethereum.github.io/JSONRPC-trace-module)). +While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**نیٹ ورک فائر ہوزز** - ایک فائر ہوز ایک gRPC سروس ہے جو ایک ترتیب شدہ، ابھی تک فورک-آگاہ، بلاکس کا سلسلہ فراہم کرتی ہے، جس کو گراف کے کور ڈویلپرز نے پیمانے پر پرفارمنس انڈیکسنگ کو بہتر طریقے سے سپورٹ کرنے کے لیے تیار کیا ہے۔ یہ فی الحال انڈیکسر کی ضرورت نہیں ہے، لیکن انڈیکسرز کو مکمل نیٹ ورک سپورٹ سے پہلے ٹیکنالوجی سے آشنا ہونے کی ترغیب دی جاتی ہے۔ فائر ہوز کے بارے میں مزید جانیں [یہاں](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS نوڈس @@ -32,25 +32,25 @@ title: گراف نوڈ کو آپریٹ کرنا نگرانی اور رپورٹنگ کو فعال کرنے کے لیے، گراف نوڈ اختیاری طور پر میٹرکس کو prometheus میٹرکس سرور پر لاگ کر سکتا ہے. -### سورس سے شروع کرنا +### Getting started from source -#### اولین ضروریات کو انسٹال کریں +#### Install prerequisites - **Rust** -- **Rust** +- **PostgreSQL** - **IPFS** -- **اوبنٹو کے صارفین کے لیے اضافی تقاضے** - اوبنٹو پر گراف نوڈ چلانے کے لیے چند اضافی پیکجوں کی ضرورت ہو سکتی ہے. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### سیٹ اپ +#### Setup -1. PostgreSQL ڈیٹا بیس سرور چالو کریں +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. [گراف نوڈ](https://github.com/graphprotocol/graph-node) ریپو کلون کریں اور `cargo build` چلا کر سورس کو بلڈ کریں +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. اب جب کہ تمام انحصار سیٹ اپ ہو چکے ہیں، گراف نوڈ شروع کریں: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,35 +71,35 @@ cargo run -p graph-node --release -- \ ### Kubernetes کے ساتھ شروع کریں -ایک مکمل Kubernetes مثال کی کنفگریشن [انڈیکسر ریپوزٹری](https://github.com/graphprotocol/indexer/tree/main/k8s) میں مل سکتی ہے. +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### پورٹس جب یہ چل رہا ہوتا ہے گراف نوڈ مندرجہ ذیل پورٹس کو بے نقاب کرتا ہے: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (سب گراف کی کیوریز کے لیے) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (سب گراف سبسکرپشنز کے لیے) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (تعیناتیوں کے انتظام کے لیے) | / | --admin-port | - | -| 8030 | سب گراف انڈیکسنگ اسٹیٹس API | /graphql | --index-node-port | - | -| 8040 | Prometheus میٹرکس | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **اہم**: پورٹس کو عوامی طور پر ظاہر کرنے میں محتاط رہیں - **انتظامی پورٹس** کو بند رکھا جانا چاہیے. اس میں گراف نوڈ کا JSON-RPC اینڈ پوائنٹ شامل ہے. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## اعلی درجے کی گراف نوڈ کنفیگریشن اس کے آسان ترین طور پر، گراف نوڈ کو گراف نوڈ کے ایک انسٹینس, واحد PostgreSQL ڈیٹا بیس، ایک IPFS نوڈ، اور نیٹ ورک کلائنٹس کے ساتھ آپریٹ کیا جا سکتا ہے جیسا کہ سب گراف کو انڈیکس کرنے کے لیے ضرورت ہوتی ہے. -ان گراف نوڈس کو سپورٹ کرنے کے لیے ایک سے زیادہ گراف نوڈس، اور ایک سے زیادہ ڈیٹا بیس کو شامل کرکے اس سیٹ اپ کو افقی طور پر طور پر بڑھایا کیا جا سکتا ہے. اعلی درجے کے صارفین گراف نوڈ کی کچھ افقی اسکیلنگ صلاحیتوں کے ساتھ ساتھ `config.toml` فائل اور گراف نوڈ کے ماحولیاتی تغیرات کے ذریعے ترتیب دینے کے کچھ جدید اختیارات سے فائدہ اٹھا سکتے ہیں. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -[TOML](https://toml.io/en/) کنفیگریشن فائل کو CLI میں بے نقاب ہونے والی کنفیگریشن سے زیادہ پیچیدہ کنفیگریشن سیٹ کرنے کے لیے استعمال کیا جا سکتا ہے. فائل کا مقام --config command line switch کے ساتھ پاس کیا جاتا ہے. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > کنفیگریشن فائل استعمال کرتے وقت، --postgres-url، --postgres-secondary-hosts اور --postgres-host-weights کے آپشنز استعمال کرنا ممکن نہیں ہے. -ایک کم سے کم `config.toml` فائل فراہم کی جا سکتی ہے. درج ذیل فائل --postgres-url کمانڈ لائن آپشن کو استعمال کرنے کے مترادف ہے: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -`config.toml` کی مکمل دستاویزات [گراف نوڈ دستاویزات ](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md)میں مل سکتی ہیں. +Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### متعدد گراف نوڈس @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### تعیناتی کے قواعد -ایک سے زیادہ گراف نوڈس کے پیش نظر، نئے سب گرافس کی تعیناتی کا انتظام کرنا ضروری ہے تاکہ ایک ہی سب گراف کو دو مختلف نوڈس کے ذریعہ انڈیکس نہ کیا جائے، جو تصادم کا باعث بنے. یہ تعیناتی کے قواعد کا استعمال کرتے ہوئے کیا جا سکتا ہے، جو یہ بھی بتا سکتا ہے کہ اگر ڈیٹا بیس کی شارڈنگ کا استعمال کیا جا رہا ہو تو سب گراف کا ڈیٹا کس `shard` میں محفوظ کیا جانا چاہیے. تعیناتی کے قواعد سب گراف کے نام اور اس نیٹ ورک پر مماثل ہو سکتے ہیں جس کو تعیناتی فیصلہ کرنے کے لیے انڈیکس کر رہی ہے. +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. مثال کی تعیناتی کے اصول کی کنفگریشن: @@ -150,7 +150,7 @@ indexers = [ ] ``` -تعیناتی کے قواعد کے بارے میں مزید [یہاں](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment) پڑھیں. +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### وقف شدہ کیوری نوڈس @@ -167,7 +167,7 @@ query = "" زیادہ تر استعمال کے معاملات میں، ایک واحد Postgres ڈیٹا بیس گراف نوڈ کی انسٹینس کو سپورٹ کرنے کے لیے کافی ہے. جب ایک گراف نوڈ کی انسٹینس ایک واحد postgres ڈیٹا بیس سے بڑھ جاتی ہے، تو یہ ممکن ہے کہ گراف نوڈ کے ڈیٹا کے ذخیرہ کو متعدد پوسٹگریس ڈیٹا بیس میں تقسیم کیا جا سکے. تمام ڈیٹا بیس مل کر گراف نوڈ انسٹینس کا اسٹور بناتے ہیں. ہر انفرادی ڈیٹا بیس کو شارڈ کہا جاتا ہے. -شارڈز کو ایک سے زیادہ ڈیٹا بیسز میں سب گراف کی تعیناتیوں کو تقسیم کرنے کے لیے استعمال کیا جا سکتا ہے، اور ڈیٹا بیس میں کیوریز کے بوجھ کو پھیلانے کے لیے نقلیں استعمال کرنے کے لیے بھی استعمال کیا جا سکتا ہے. اس میں دستیاب ڈیٹا بیس کنکشنز کی تعداد کو ترتیب دینا شامل ہے جو ہر ایک `graph-node` کو ہر ڈیٹا بیس کے لیے اپنے کنکشن پول میں رکھنا چاہیے، جو کہ تیزی سے اہم ہو جاتا ہے کیونکہ مزید سب گرافس کو انڈیکس کیا جا رہا ہے. +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. شارڈنگ مفید ہو جاتا ہے جب آپ کا موجودہ ڈیٹا بیس اس بوجھ کو برقرار نہیں رکھ سکتا جو گراف نوڈ اس پر ڈالتا ہے، اور جب ڈیٹا بیس کے سائز کو مزید بڑھانا ممکن نہ ہو. @@ -175,11 +175,11 @@ query = "" کنکشن کنفیگر کرنے کے معاملے میں، postgresql.conf میں max_connections کے ساتھ شروع کریں 400 سیٹ کریں(یا شاید 200 بھی) اور store_connection_wait_time_ms اور store_connection_checkout_count Prometheus میٹرکس دیکھیں. قابل توجہ انتظار کے اوقات (5ms سے اوپر کی کوئی بھی چیز) اس بات کا اشارہ ہے کہ بہت کم کنکشن دستیاب ہیں; زیادہ انتظار کا وقت بھی ڈیٹا بیس کے بہت مصروف ہونے کی وجہ سے ہوگا (جیسے زیادہ CPU لوڈ). تاہم اگر ڈیٹا بیس بصورت دیگر مستحکم معلوم ہوتا ہے تو، زیادہ انتظار کے اوقات کنکشن کی تعداد بڑھانے کی ضرورت کی نشاندہی کرتے ہیں. کنفیگریشن میں، ہر گراف نوڈ انسٹینس کتنے کنکشن استعمال کر سکتا ہے ایک بالائی حد ہے، اور اگر گراف نوڈ کو ان کی ضرورت نہ ہو تو کنکشن کو کھلا نہیں رکھے گا. -اسٹور کنفیگریشن کے بارے میں مزید [یہاں](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases) پڑھیں. +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### وقف شدہ بلاک انجیش -اگر ایک سے زیادہ نوڈس کنفیگر کیے گئے ہیں، تو ایک نوڈ کو مختص کرنا ضروری ہو گا جو نئے بلاکس کے ادخال کے لیے ذمہ دار ہے، تاکہ تمام کنفیگر کیے گئے انڈیکس نوڈس چین ہیڈ کو پولنگ نہیں کر رہے ہوں. یہ `chains` namespace کے حصے کے طور پر کیا جاتا ہے، جس میں `node_id` کو بلاک ادخال کے لیے استعمال کیا جائے گا: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### متعدد نیٹ ورکس کو سپورٹ کرنا -گراف پروٹوکول انعامات کی انڈیکسنگ کے لیے تعاون یافتہ نیٹ ورکس کی تعداد میں اضافہ کر رہا ہے، اور ایسے بہت سے سب گراف موجود ہیں جو غیر تعاون یافتہ نیٹ ورکس کو ترتیب دیتے ہیں جن کو ایک انڈیکسر عمل میں لانا چاہے گا. `config.toml` فائل اس کی تاثراتی اور لچکدار کنفیگریشن کی اجازت دیتی ہے: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - متعدد نیٹ ورکس - ایک سے زیادہ فراہم کنندگان فی نیٹ ورک (یہ فراہم کنندگان میں بوجھ کو تقسیم کرنے کی اجازت دے سکتا ہے، اور مکمل نوڈس کے ساتھ ساتھ آرکائیو نوڈس کی ترتیب کی بھی اجازت دے سکتا ہے، گراف نوڈ سستے فراہم کنندگان کو ترجیح دیتا ہے اگر کام کا بوجھ اجازت دیتا ہے). - فراہم کنندہ کی اضافی تفصیلات، جیسے خصوصیات، تصدیق اور فراہم کنندہ کی قسم (تجرباتی firehose سپورٹ کے لیے) -`[chains]` سیکشن ایتھیریم فراہم کنندگان کو کنٹرول کرتا ہے جن سے گراف نوڈ جڑتا ہے، اور جہاں ہر چین کے لیے بلاکس اور دیگر میٹا ڈیٹا کو محفوظ کیا جاتا ہے. مندرجہ ذیل مثال دو چینز کو ترتیب دیتی ہے، مین نیٹ اور کووان، جہاں مین نیٹ کے لیے بلاکس کو vip شارڈ میں اور کووان کے لیے بلاکس کو پرائمری شارڈ میں محفوظ کیا جاتا ہے. مین نیٹ چین دو مختلف فراہم کنندگان کا استعمال کر سکتا ہے، جبکہ کووان کے پاس صرف ایک فراہم کنندہ ہے. +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,18 +210,18 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -فراہم کنندہ کنفیگریشن کے بارے میں مزید پڑھیں [یہاں](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### ماحولیاتی تغیرات -گراف نوڈ ماحولیاتی تغیرات کی ایک حد کو سپورٹ کرتا ہے جو خصوصیات کو فعال کر سکتا ہے، یا گراف نوڈ کے رویہ کو تبدیل کر سکتا ہے. یہ [یہاں](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) درج ہیں. +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### مسلسل تعیناتی وہ صارفین جو اعلی درجے کی ترتیب کے ساتھ سکیلڈ انڈیکسنگ سیٹ اپ چلا رہے ہیں وہ اپنے گراف نوڈس کو Kubernetes کے ساتھ منظم کرنے سے فائدہ اٹھا سکتے ہیں. -- انڈیکسر ریپوزٹری میں ایک [مثال Kubernetes حوالہ](https://github.com/graphprotocol/indexer/tree/main/k8s) ہے -- [Launchpad](https://docs.graphops.xyz/launchpad/intro), GraphOps کے زیر انتظام Kubernetes پر گراف پروٹوکول انڈیکسر چلانے کے لیے ایک toolkit ہے. یہ گراف نوڈ کی تعیناتی کو منظم کرنے کے لیے Helm charts اور ایک CLI فراہم کرتا ہے. +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### گراف نوڈ کا انتظام @@ -229,23 +229,23 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] #### لاگنگ -گراف نوڈ کے لاگز گراف نوڈ اور مخصوص سب گراف کی ڈیبگنگ اور اصلاح کے لیے مفید معلومات فراہم کر سکتے ہیں. گراف نوڈ `GRAPH_LOG` ماحولیاتی تغیرات کے ذریعے درج ذیل سطحوں کے ساتھ مختلف لاگ سطح کو سپورٹ کرتا ہے: error، warn، info، debug یا trace. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -اس کے علاوہ `GRAPH_LOG_QUERY_TIMING` کو `gql` پر سیٹ کرنا اس بارے میں مزید تفصیلات فراہم کرتا ہے کہ GraphQL کی کیوریز کیسے چل رہی ہیں (حالانکہ یہ logs کی ایک بڑی مقدار پیدا کرے گا). +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### نگرانی اور انتباہ +#### Monitoring & alerting گراف نوڈ بطور ڈیفالٹ 8040 port پر prometheus endpoint کے ذریعے میٹرکس فراہم کرتا ہے. Grafana کو پھر ان metrics کو دیکھنے کے لیے استعمال کیا جا سکتا ہے. -انڈیکسر ریپوزٹری ایک [مثال Grafana کنفگریشن](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml) فراہم کرتی ہے. +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -` graphman` گراف نوڈ کے لیے ایک مینٹیننس ٹول ہے، جو روزانہ مختلف اور غیر معمولی کاموں کی تشخیص اور حل میں مدد کرتا ہے. +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -گراف مین کمانڈ آفیشل کنٹینرز میں شامل ہے، اور آپ اسے چلانے کے لیے اپنے گراف نوڈ کنٹینر میں docker exec کر سکتے ہیں. اس کے لیے ایک `config.toml` فائل درکار ہے. +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -`graphman` کمانڈز کی مکمل دستاویزات گراف نوڈ ریپوزٹری میں دستیاب ہیں. گراف نوڈ `/docs` \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) میں دیکھیں +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### سب گرافس کے ساتھ کام کرنا @@ -253,7 +253,7 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] پورٹ 8030/graphql پر بطور ڈیفالٹ دستیاب ہے، انڈیکسنگ اسٹیٹس API مختلف سب گرافس کے لیے انڈیکسنگ کی حیثیت کو جانچنے، انڈیکسنگ کے ثبوتوں کی جانچ، سب گراف کی خصوصیات کا معائنہ کرنے اور مزید بہت سے طریقوں کو ظاہر کرتا ہے. -مکمل اسکیما [یہاں](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) دستیاب ہے. +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### انڈیکسنگ کی کارکردگی @@ -267,8 +267,8 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] انڈیکسنگ میں سستی کی عام وجوہات: -- چین سے متعلقہ واقعات کو تلاش کرنے میں لگنے والا وقت (خاص طور پر کال ہینڈلرز سست ہوسکتے ہیں، `trace_filter` پر انحصار کے پیش نظر) -- ہینڈلرز کے حصے کے طور پر بڑی تعداد میں `eth_calls` بنانا +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) +- Making large numbers of `eth_calls` as part of handlers - عمل درآمد کے دوران سٹور کے تعامل کی ایک بڑی مقدار - اسٹور میں محفوظ کرنے کے لیے ڈیٹا کی ایک بڑی مقدار - کارروائی کرنے کے لیے ایوینٹس کی ایک بڑی تعداد @@ -287,19 +287,19 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] بعض صورتوں میں ایک ناکامی کو انڈیکسر کے ذریعے حل کیا جا سکتا ہے (مثال کے طور پر اگر غلطی صحیح قسم کا فراہم کنندہ نہ ہونے کا نتیجہ ہے، مطلوبہ فراہم کنندہ کو شامل کرنے سے انڈیکسنگ جاری رہے گی). تاہم دوسری صورتوں میں، سب گراف کوڈ میں تبدیلی کی ضرورت ہے. -> ڈیٹرمنسٹک ناکامیوں کو "حتمی" سمجھا جاتا ہے، جس میں فیلنگ بلاک کے لیے انڈیکسنگ کا ثبوت تیار کیا جاتا ہے، جب کہ نان ڈیٹرمنسٹک ناکامیاں نہیں ہوتی ہیں، کیونکہ سب گراف "نا ناکام" ہونے کا انتظام کر سکتا ہے اور انڈیکسنگ جاری رکھ سکتا ہے. بعض صورتوں میں، غیر متعین لیبل غلط ہے، اور سب گراف کبھی بھی غلطی پر قابو نہیں پائے گا. اس طرح کی ناکامیوں کو گراف نوڈ ریپوزٹری پر مسائل کے طور پر رپورٹ کیا جانا چاہئے. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### کیشے -فراہم کنندہ سے ری فیچنگ کو بچانے کے لیے گراف نوڈ اسٹور میں مخصوص ڈیٹا کیشے کر لیتا ہے. بلاکس کیش کیے جاتے ہیں، جیسا کہ `eth_calls` کے نتائج ہیں (مؤخر الذکر کو مخصوص بلاک کے طور پر کیش کیا جاتا ہے). یہ کیشنگ قدرے تبدیل شدہ سب گراف کی "دوبارہ مطابقت پذیری" کے دوران انڈیکسنگ کی رفتار کو شدید طور پر بڑھا سکتی ہے. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. -تاہم، بعض صورتوں میں، اگر ایتھیریم نوڈ نے کچھ مدت کے لیے غلط ڈیٹا فراہم کیا ہے، تو یہ کیش میں اپنا راستہ بنا سکتا ہے، جس کے نتیجے میں غلط ڈیٹا یا ناکام سب گرافس ہوتے ہیں۔ اس معاملے میں انڈیکسرز زہریلے کیشے کو صاف کرنے کے لیے `graphman` کا استعمال کر سکتے ہیں، اور پھر متاثرہ سب گراف کو ریوائنڈ کر سکتے ہیں، جو پھر (امید ہے) صحت مند فراہم کنندہ سے تازہ ڈیٹا حاصل کرے گا. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. اگر کسی بلاک کیشے کی عدم مطابقت کا شبہ ہے، جیسے کہ tx رسید غائب ہونے کا ایوینٹ: -1. چین کا نام تلاش کرنے کے لیے `graphman chain list`. -2. `graphman chain check-blocks by-number ` چیک کرے گا کہ آیا کیش شدہ بلاک فراہم کنندہ سے مماثل ہے، اور اگر ایسا نہیں ہوتا ہے تو بلاک کو کیشے سے حذف کر دیتا ہے. - 1. اگر کوئی فرق ہے تو، `graphman chain truncate ` کے ساتھ پورے کیش کو تراشنا زیادہ محفوظ ہوسکتا ہے. +1. `graphman chain list` to find the chain name. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. اگر بلاک فراہم کنندہ سے میل کھاتا ہے، تو مسئلہ کو براہ راست فراہم کنندہ کے خلاف ڈیبگ کیا جا سکتا ہے. #### مسائل اور غلطیوں کو کیوری کرنا @@ -312,7 +312,7 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] ##### کیوری کی کیشنگ -گراف نوڈ پہلے سے طے شدہ طور پر GraphQL کی کیوریز کو محفوظ کرتا ہے، جو ڈیٹا بیس کے بوجھ کو نمایاں طور پر کم کر سکتا ہے. اسے `GRAPH_QUERY_CACHE_BLOCKS` اور `GRAPH_QUERY_CACHE_MAX_MEM` ترتیبات کے ساتھ مزید ترتیب دیا جا سکتا ہے. مزید [یہاں](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching) پڑھیں. +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### کیوریز کا تجزیہ کرنا @@ -320,7 +320,7 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] دوسری صورتوں میں، مسئلہ ایک کیوری نوڈ پر زیادہ میموری کا استعمال ہو سکتا ہے، ایسی صورت میں چیلنج سب سے پہلے اس کیوری کی نشاندہی کرنا ہے جس کی وجہ سے مسئلہ ہے. -انڈیکسرز گراف نوڈ کے کیوری کے کیوری لاگز پر کارروائی اور خلاصہ کرنے کے لیے [qlog](https://github.com/graphprotocol/qlog/) کا استعمال کر سکتے ہیں. `GRAPH_LOG_QUERY_TIMING` کو بھی سست کیوریز کی شناخت اور ڈیبگ کرنے میں مدد کے لیے فعال کیا جا سکتا ہے. +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. سست کیوریز کو دیکھتے ہوئے، انڈیکسرز کے پاس کچھ آپشنز ہوتے ہیں. بلاشبہ وہ اپنے لاگت ماڈل کو تبدیل کر سکتے ہیں، تاکہ مسئلہ کرنے والی کیوری کو بھیجنے کی لاگت میں نمایاں اضافہ ہو سکے. اس کے نتیجے میں اس کیوری کی تعدد میں کمی واقع ہوسکتی ہے. تاہم یہ اکثر مسئلے کی بنیادی وجہ کو حل نہیں کرتا ہے. @@ -328,18 +328,18 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] ڈیٹا بیس ٹیبلز جو اینٹیٹیز کو ذخیرہ کرتے ہیں عام طور پر دو قسموں میں آتے ہیں: 'transaction-like'، جہاں اینٹیٹیز کو، ایک بار بنائے جانے کے بعد، کبھی اپ ڈیٹ نہیں کیا جاتا، یعنی، وہ مالیاتی ٹرانزیکشن کی فہرست کے مشابہ کچھ ذخیرہ کرتے ہیں، اور 'account-like' جہاں اینٹیٹیز کو اکثر اپ ڈیٹ کیا جاتا ہے، یعنی، وہ مالیاتی اکاؤنٹس کی طرح کچھ ذخیرہ کرتے ہیں جو ہر بار جب کوئی ٹرانزیکشن ریکارڈ ہوتا ہے تو ان میں ترمیم کی جاتی ہے. اکاؤنٹ جیسی ٹیبلز اس حقیقت کی خصوصیت رکھتی ہیں کہ ان میں بڑی تعداد میں اینٹیٹی کے ورژنز ہوتے ہیں، لیکن نسبتاً کم الگ الگ اینٹیٹیز ہیں. اکثر، ایسے ٹیبلز میں الگ الگ اینٹیٹیز کی تعداد قطاروں کی کل تعداد کا 1% ہوتی ہے (اینٹیٹی کے ورژنز) -اکاؤنٹ جیسا کے ٹیبلز کے لیے، `graph-node` ایسی کیوریز پیدا کر سکتا ہے جو اس تفصیلات سے فائدہ اٹھاتے ہیں کہ Postgres اتنی زیادہ شرح کے ساتھ ڈیٹا کو کیسے ذخیرہ کرتا ہے، یعنی کہ حالیہ بلاکس کے تمام ورژنز اس طرح کے ٹیبل کے لئے مجموعی اسٹوریج کا ایک چھوٹا ذیلی حصہ. +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -کمانڈ `graphman stats show ظاہر کرتا ہے، ایک تعیناتی میں ہر ہستی کی قسم/ٹیبل کے لیے، کتنی الگ الگ ہستیوں، اور ہر ٹیبل میں ہستی کے کتنے ورژنز ہیں. یہ ڈیٹا Postgres کے اندرونی اندازوں پر مبنی ہے، اور اس لیے ضروری طور پر غلطی کا امکان ہے، اور شدت کے توازن سے پھر سکتا ہے. `entities` کالم میں ایک `-1` کا مطلب ہے کہ Postgres کا خیال ہے کہ تمام قطاریں ایک الگ ہستی پر مشتمل ہیں. +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -عام طور پر، وہ ٹیبلز جہاں الگ الگ ہستیوں کی تعداد قطاریں/ہستی ورژنز کی کل تعداد کے 1% سے کم ہوتی ہے account-like کی آپٹیمائزیشن کے لیے اچھے امیدوار ہوتے ہیں. جب `graphman stats` کا آؤٹ پٹ اشارہ کرتا ہے کہ ایک ٹیبل اس اصلاح سے فائدہ اٹھا سکتا ہے، تو `graphman stats show
    ` کو چلانے سے ٹیبل کی مکمل گنتی ہوگی. جو سست ہو سکتا ہے، لیکن اینٹیٹی کے مجموعی ورژنز کے لیے الگ الگ اینٹیٹیز کے تناسب کا ایک درست پیمانہ فراہم کرتا ہے. +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -ایک بار جب ایک ٹیبل اکاؤنٹ جیسا ہونے کا تعین کر لیا جاتا ہے، تو `graphman stats account-like .
    ` چلانے سے اس ٹیبل کے خلاف کیوریز کے لیے اکاؤنٹ کی طرح کی آپٹیمائزیشن آن ہو جائے گی. آپٹیمائزیشن کو دوبارہ `graphman stats account-like --clear .
    ` کے ساتھ دوبارہ بند کیا جا سکتا ہے یہ معلوم کرنے میں کیوری نوڈس کو 5 منٹ تک کا وقت لگتا ہے کہ آپٹمائزیشن آن کر دی گئی ہے یا بند. آپٹیمائزیشن کو آن کرنے کے بعد، اس بات کی تصدیق کرنا ضروری ہے کہ تبدیلی درحقیقت اس ٹیبل کے لیے کیوریز کو آہستہ نہیں کرتی ہے. اگر آپ نے Postgres کی نگرانی کے لیے گرافانا کو کنفیگر کیا ہے، تو سست کیوریز `pg_stat_activity` میں بڑی تعداد میں ظاہر ہوں گے، جس میں کئی سیکنڈ لگیں گے. اس صورت میں، آپٹیمائزیشن کو دوبارہ بند کرنے کی ضرورت ہے. +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -یونی سویپ جیسے سب گرافس کے لیے، `pair` اور `token` ٹیبلز اس آپٹیمائزیشن کے لیے اہم امیدوار ہیں، اور ڈیٹا بیس کے بوجھ پر کافی اثر ڈال سکتے ہیں. +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### سب گراف کو ختم کرنا > یہ نئی فعالیت ہے، جو گراف نوڈ 0.29.x میں دستیاب ہوگی -کسی موقع پر ایک انڈیکسر شاید دیئے گئے سب گراف کو ختم کرنا چاہے گا. یہ آسانی سے `graphman drop` کے ذریعے کیا جا سکتا ہے، جو ایک تعیناتی اور اس کے تمام انڈیکس شدہ ڈیٹا کو ختم کر دیتا ہے. تعیناتی کو یا تو سب گراف کے نام، IPFS ہیش `Qm..`، یا ڈیٹا بیس namespace کی جگہ `sgdNNN` کے طور پر بیان کیا جا سکتا ہے. مزید دستاویزات [یہاں](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) دستیاب ہیں. +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From f6d4c65d8a96591d7bdad176d4da1d6f5292786c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:41 -0500 Subject: [PATCH 0129/1534] New translations graph-node.mdx (Vietnamese) --- .../pages/vi/indexing/tooling/graph-node.mdx | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/website/src/pages/vi/indexing/tooling/graph-node.mdx b/website/src/pages/vi/indexing/tooling/graph-node.mdx index 82cebd402554..6a27301b680b 100644 --- a/website/src/pages/vi/indexing/tooling/graph-node.mdx +++ b/website/src/pages/vi/indexing/tooling/graph-node.mdx @@ -1,5 +1,5 @@ --- -title: Operating Graph Node +title: Graph Node --- Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -32,9 +32,9 @@ Subgraph deployment metadata is stored on the IPFS network. The Graph Node prima To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### Bắt đầu từ nguồn +### Getting started from source -#### Cài đặt điều kiện tiên quyết +#### Install prerequisites - **Rust** @@ -42,15 +42,15 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Yêu cầu bổ sung cho người dùng Ubuntu** - Để chạy Graph Node trên Ubuntu, có thể cần một số gói bổ sung. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config ``` -#### Cài đặt +#### Setup -1. Khởi động máy chủ cơ sở dữ liệu PostgreSQL +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Nhân bản [Graph Node](https://github.com/graphprotocol/graph-node) repo và xây dựng nguồn bằng cách chạy `cargo build` +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. Bây giờ tất cả các phụ thuộc đã được thiết lập, hãy khởi động Graph Node: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Cổng | Mục đích | Tuyến | Đối số CLI | Biến môi trường | -| --- | --- | --- | --- | --- | -| 8000 | Máy chủ GraphQL HTTP
    (cho các truy vấn subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (cho các đăng ký subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (để quản lý triển khai) | / | --admin-port | - | -| 8030 | API trạng thái lập chỉ mục Subgraph | /graphql | --index-node-port | - | -| 8040 | Số liệu Prometheus | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. @@ -245,7 +245,7 @@ The indexer repository provides an [example Grafana configuration](https://githu The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### Working with subgraphs @@ -287,7 +287,7 @@ During indexing subgraphs might fail, if they encounter data that is unexpected, In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### Block and call cache From a6740cf428ab3e078fcec696cf77f441bcb4ec93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:42 -0500 Subject: [PATCH 0130/1534] New translations graph-node.mdx (Marathi) --- .../pages/mr/indexing/tooling/graph-node.mdx | 140 +++++++++--------- 1 file changed, 70 insertions(+), 70 deletions(-) diff --git a/website/src/pages/mr/indexing/tooling/graph-node.mdx b/website/src/pages/mr/indexing/tooling/graph-node.mdx index 907114f75be5..168544ca640a 100644 --- a/website/src/pages/mr/indexing/tooling/graph-node.mdx +++ b/website/src/pages/mr/indexing/tooling/graph-node.mdx @@ -1,20 +1,20 @@ --- -title: ऑपरेटिंग ग्राफ नोड +title: आलेख नोड --- -ग्राफ नोड हा घटक आहे जो सबग्राफ इंडेक्स करतो आणि परिणामी डेटा GraphQL API द्वारे क्वेरीसाठी उपलब्ध करतो. हे इंडेक्सर स्टॅकसाठी मध्यवर्ती आहे आणि यशस्वी इंडेक्सर चालवण्यासाठी ग्राफ नोडचे योग्य ऑपरेशन महत्वाचे आहे. +Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. -हे ग्राफ नोडचे संदर्भित विहंगावलोकन आणि इंडेक्सर्ससाठी उपलब्ध काही अधिक प्रगत पर्याय प्रदान करते. तपशीलवार दस्तऐवजीकरण आणि सूचना [ग्राफ नोड भांडार](https://github.com/graphprotocol/graph-node) मध्ये आढळू शकतात. +This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). ## आलेख नोड -[ग्राफ नोड](https://github.com/graphprotocol/graph-node) हे ग्राफ नेटवर्कवर सबग्राफ अनुक्रमित करण्यासाठी, ब्लॉकचेन क्लायंटशी कनेक्ट करण्यासाठी, सबग्राफ अनुक्रमित करण्यासाठी आणि अनुक्रमित डेटा उपलब्ध करण्यासाठी संदर्भ अंमलबजावणी आहे पृच्छणे. +[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. -ग्राफ नोड (आणि संपूर्ण इंडेक्सर स्टॅक) बेअर मेटलवर किंवा क्लाउड वातावरणात चालवला जाऊ शकतो. केंद्रीय अनुक्रमणिका घटकाची ही लवचिकता आलेख प्रोटोकॉलच्या मजबुतीसाठी महत्त्वपूर्ण आहे. त्याचप्रमाणे, आलेख नोड [स्रोत पासून तयार](https://github.com/graphprotocol/graph-node) केला जाऊ शकतो किंवा अनुक्रमणिका [डॉकर प्रतिमा प्रदान केल्या](https:// पैकी एक वापरू शकतात hub.docker.com/r/graphprotocol/graph-node). +Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL डेटाबेस -ग्राफ नोडसाठी मुख्य स्टोअर, येथे सबग्राफ डेटा संग्रहित केला जातो, तसेच सबग्राफबद्दलचा मेटाडेटा आणि सबग्राफ-अज्ञेयवादी नेटवर्क डेटा जसे की ब्लॉक कॅशे आणि eth_call कॅशे. +The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. ### नेटवर्क क्लायंट @@ -26,7 +26,7 @@ While some subgraphs may just require a full node, some may have indexing featur ### आयपीएफएस नोड्स -सबग्राफ डिप्लॉयमेंट मेटाडेटा आयपीएफएस नेटवर्कवर संग्रहित केला जातो. सबग्राफ मॅनिफेस्ट आणि सर्व लिंक केलेल्या फाइल्स आणण्यासाठी सबग्राफ डिप्लॉयमेंट दरम्यान ग्राफ नोड प्रामुख्याने आयपीएफएस नोडमध्ये प्रवेश करतो. नेटवर्क इंडेक्सर्सना त्यांचे स्वतःचे IPFS नोड होस्ट करण्याची आवश्यकता नाही. नेटवर्कसाठी एक IPFS नोड https://ipfs.network.thegraph.com वर होस्ट केला आहे. +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### प्रोमिथियस मेट्रिक्स सर्व्हर @@ -36,7 +36,7 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P #### Install prerequisites -- **गंज** +- **Rust** - **PostgreSQL** @@ -73,29 +73,29 @@ cargo run -p graph-node --release -- \ A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). -### बंदरे +### Ports When it is running Graph Node exposes the following ports: -| बंदर | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **महत्त्वाचे**: पोर्ट सार्वजनिकपणे उघड करण्याबाबत सावधगिरी बाळगा - **प्रशासन पोर्ट** लॉक डाउन ठेवले पाहिजेत. यामध्ये ग्राफ नोड JSON-RPC एंडपॉइंटचा समावेश आहे. +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. ## प्रगत ग्राफ नोड कॉन्फिगरेशन -सर्वात सोप्या पद्धतीने, ग्राफ नोड, ग्राफ नोड, एकल PostgreSQL डेटाबेस, एक IPFS नोड, आणि सबग्राफ अनुक्रमित करण्यासाठी आवश्यक असलेल्या नेटवर्क क्लायंटच्या एकाच उदाहरणासह ऑपरेट केला जाऊ शकतो. +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. -हा सेटअप क्षैतिजरित्या स्केल केला जाऊ शकतो, अनेक ग्राफ नोड्स आणि त्या ग्राफ नोड्सला समर्थन देण्यासाठी एकाधिक डेटाबेस जोडून. प्रगत वापरकर्ते ग्राफ नोडच्या काही क्षैतिज स्केलिंग क्षमतांचा, तसेच काही अधिक प्रगत कॉन्फिगरेशन पर्यायांचा लाभ घेऊ इच्छितात, `config.toml` फाइल आणि ग्राफ नोडच्या पर्यावरणीय चलने. +This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. ### `config.toml` -[TOML](https://toml.io/en/) कॉन्फिगरेशन फाईल CLI मध्ये उघड केलेल्या कॉन्फिगरेशनपेक्षा अधिक जटिल कॉन्फिगरेशन सेट करण्यासाठी वापरली जाऊ शकते. फाइलचे स्थान --config कमांड लाइन स्विचसह पास केले जाते. +A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. > When using a configuration file, it is not possible to use the options --postgres-url, --postgres-secondary-hosts, and --postgres-host-weights. @@ -120,7 +120,7 @@ Graph Node indexing can scale horizontally, running multiple instances of Graph #### डिप्लॉयमेंट नियम -एकापेक्षा जास्त आलेख नोड्स दिल्यास, नवीन सबग्राफची तैनाती व्यवस्थापित करणे आवश्यक आहे जेणेकरून समान सबग्राफ दोन भिन्न नोड्सद्वारे अनुक्रमित केला जाणार नाही, ज्यामुळे टक्कर होईल. हे उपयोजन नियम वापरून केले जाऊ शकते, जे डेटाबेस शार्डिंग वापरत असल्यास, सबग्राफचा डेटा कोणत्या `shard` मध्ये संग्रहित केला जावा हे देखील निर्दिष्ट करू शकते. डिप्लॉयमेंट नियम उपग्राफ नाव आणि नेटवर्कवर जुळू शकतात जे निर्णय घेण्यासाठी उपयोजन अनुक्रमित करत आहे. +Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. उपयोजन नियम कॉन्फिगरेशनचे उदाहरण: @@ -150,7 +150,7 @@ indexers = [ ] ``` -तैनाती नियमांबद्दल [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment) अधिक वाचा. +Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). #### समर्पित क्वेरी नोड्स @@ -165,21 +165,21 @@ Any node whose --node-id matches the regular expression will be set up to only r #### शार्डिंगद्वारे डेटाबेस स्केलिंग -बर्‍याच वापराच्या प्रकरणांसाठी, ग्राफ-नोड उदाहरणास समर्थन देण्यासाठी एकच पोस्टग्रेस डेटाबेस पुरेसा आहे. जेव्हा ग्राफ-नोडचे उदाहरण एका पोस्टग्रेस डेटाबेसला मागे टाकते, तेव्हा ग्राफ-नोडच्या डेटाचे स्टोरेज एकाधिक पोस्टग्रेस डेटाबेसमध्ये विभाजित करणे शक्य आहे. सर्व डेटाबेस एकत्रितपणे आलेख-नोड उदाहरणाचे स्टोअर तयार करतात. प्रत्येक वैयक्तिक डेटाबेसला शार्ड म्हणतात. +For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -शार्ड्सचा वापर एकाधिक डेटाबेसमध्ये सबग्राफ डिप्लॉयमेंट्स विभाजित करण्यासाठी केला जाऊ शकतो आणि डेटाबेसमध्ये क्वेरी लोड पसरवण्यासाठी प्रतिकृती वापरण्यासाठी देखील वापरला जाऊ शकतो. यामध्ये प्रत्येक `ग्राफ-नोड` ने प्रत्येक डेटाबेससाठी त्याच्या कनेक्शन पूलमध्ये ठेवल्या पाहिजेत अशा उपलब्ध डेटाबेस कनेक्शनची संख्या कॉन्फिगर करणे समाविष्ट आहे, जे अधिक सबग्राफ अनुक्रमित केले जात असल्याने वाढत्या प्रमाणात महत्त्वाचे होते. +Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. -जेव्हा तुमचा विद्यमान डेटाबेस ग्राफ नोडवर ठेवत असलेल्या लोडसह ठेवू शकत नाही आणि जेव्हा डेटाबेस आकार वाढवणे शक्य नसेल तेव्हा शार्डिंग उपयुक्त ठरते. +Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. -> शार्ड्ससह प्रारंभ करण्यापूर्वी एकच डेटाबेस शक्य तितका मोठा करणे सामान्यतः चांगले आहे. एक अपवाद असा आहे की जिथे क्वेरी ट्रॅफिक सबग्राफ्समध्ये खूप असमानपणे विभाजित केले जाते; उच्च-आवाजातील सबग्राफ एका शार्डमध्ये आणि इतर सर्व गोष्टी दुसर्‍यामध्ये ठेवल्या गेल्यास अशा परिस्थितीत ते नाटकीयरित्या मदत करू शकते कारण त्या सेटअपमुळे उच्च-व्हॉल्यूम सबग्राफचा डेटा db-अंतर्गत कॅशेमध्ये राहण्याची अधिक शक्यता असते. कमी-व्हॉल्यूम सबग्राफमधून आवश्यक नसलेल्या डेटाद्वारे बदला. +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. -कनेक्शन कॉन्फिगर करण्याच्या दृष्टीने, postgresql.conf मधील max_connections ने 400 (किंवा कदाचित 200 देखील) सेट करून प्रारंभ करा आणि store_connection_wait_time_ms आणि store_connection_checkout_count Prometheus मेट्रिक्स पहा. लक्षात येण्याजोग्या प्रतीक्षा वेळा (5ms वरील काहीही) हे एक संकेत आहे की तेथे खूप कमी कनेक्शन उपलब्ध आहेत; डेटाबेस खूप व्यस्त असल्यामुळे (जसे की उच्च CPU लोड) जास्त प्रतीक्षा वेळ देखील असेल. तथापि, डेटाबेस अन्यथा स्थिर वाटत असल्यास, उच्च प्रतीक्षा वेळ कनेक्शनची संख्या वाढवण्याची आवश्यकता दर्शवते. कॉन्फिगरेशनमध्ये, प्रत्येक ग्राफ-नोड उदाहरणे किती कनेक्शन वापरू शकतात ही एक वरची मर्यादा आहे आणि ग्राफ नोडला आवश्यक नसल्यास कनेक्शन उघडे ठेवणार नाही. +In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. -स्टोअर कॉन्फिगरेशनबद्दल [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases) अधिक वाचा. +Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). #### समर्पित ब्लॉक अंतर्ग्रहण -जर अनेक नोड्स कॉन्फिगर केले असतील तर, नवीन ब्लॉक्सच्या अंतर्ग्रहणासाठी जबाबदार असणारा एक नोड निर्दिष्ट करणे आवश्यक असेल, जेणेकरून सर्व कॉन्फिगर केलेले इंडेक्स नोड्स चेन हेडवर मतदान करत नाहीत. हे `साखळी` नेमस्पेसचा भाग म्हणून केले जाते, ब्लॉक अंतर्ग्रहणासाठी वापरले जाणारे `node_id` निर्दिष्ट करते: +If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### एकाधिक नेटवर्क समर्थन -आलेख प्रोटोकॉल इंडेक्सिंग रिवॉर्ड्ससाठी समर्थित नेटवर्क्सची संख्या वाढवत आहे आणि असे अनेक सबग्राफ आहेत जे असमर्थित नेटवर्क अनुक्रमित करतात ज्यावर इंडेक्सर प्रक्रिया करू इच्छितो. `config.toml` फाइल अर्थपूर्ण आणि लवचिक कॉन्फिगरेशनसाठी परवानगी देते: +The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: - एकाधिक नेटवर्क -- प्रति नेटवर्क एकाधिक प्रदाते (हे प्रदात्यांमध्ये लोडचे विभाजन करण्यास अनुमती देऊ शकते आणि पूर्ण नोड्स तसेच संग्रहित नोड्सच्या कॉन्फिगरेशनला देखील अनुमती देऊ शकते, दिलेल्या वर्कलोडला परवानगी दिल्यास ग्राफ नोड स्वस्त प्रदात्यांकडे प्राधान्य देतो). -- अतिरिक्त प्रदाता तपशील, जसे की वैशिष्ट्ये, प्रमाणीकरण आणि प्रदात्याचा प्रकार (प्रायोगिक फायरहोस समर्थनासाठी) +- Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). +- Additional provider details, such as features, authentication and the type of provider (for experimental Firehose support) -`[chains]` विभाग इथेरियम प्रदाते नियंत्रित करतो ज्यांना ग्राफ-नोड कनेक्ट केले जाते आणि जेथे प्रत्येक साखळीसाठी ब्लॉक आणि इतर मेटाडेटा संग्रहित केला जातो. खालील उदाहरण दोन चेन कॉन्फिगर करते, मेननेट आणि कोव्हन, जिथे मेननेटचे ब्लॉक्स vip शार्डमध्ये साठवले जातात आणि कोवनचे ब्लॉक्स प्राथमिक शार्डमध्ये साठवले जातात. मेननेट चेन दोन भिन्न प्रदाते वापरू शकते, तर कोव्हनमध्ये फक्त एक प्रदाता आहे. +The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. ```toml [chains] @@ -210,64 +210,64 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -प्रदाता कॉन्फिगरेशनबद्दल [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers) अधिक वाचा. +Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). ### पर्यावरण परिवर्तने -ग्राफ नोड पर्यावरणीय चलांच्या श्रेणीचे समर्थन करते जे वैशिष्ट्ये सक्षम करू शकतात किंवा ग्राफ नोड वर्तन बदलू शकतात. हे [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) दस्तऐवजीकरण केलेले आहेत. +Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). ### सतत तैनाती -प्रगत कॉन्फिगरेशनसह स्केल केलेले इंडेक्सिंग सेटअप चालवणारे वापरकर्ते त्यांचे ग्राफ नोड्स Kubernetes सोबत व्यवस्थापित करण्याचा फायदा घेऊ शकतात. +Users who are operating a scaled indexing setup with advanced configuration may benefit from managing their Graph Nodes with Kubernetes. - The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) हे GraphOps द्वारे देखरेख केलेल्या Kubernetes वर ग्राफ प्रोटोकॉल इंडेक्सर चालवण्यासाठी टूलकिट आहे. हे ग्राफ नोड उपयोजन व्यवस्थापित करण्यासाठी हेल्म चार्ट आणि एक CLI प्रदान करते. +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. ### ग्राफ नोडचे व्यवस्थापन -चालू असलेला आलेख नोड (किंवा आलेख नोड्स!) दिल्यास, त्या नोड्सवर उपयोजित सबग्राफ व्यवस्थापित करण्याचे आव्हान आहे. उपग्राफ व्यवस्थापित करण्यात मदत करण्यासाठी ग्राफ नोड अनेक साधनांची श्रेणी तयार करतो. +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. #### लॉगिंग -ग्राफ नोडचे लॉग ग्राफ नोड आणि विशिष्ट सबग्राफच्या डीबगिंग आणि ऑप्टिमायझेशनसाठी उपयुक्त माहिती प्रदान करू शकतात. ग्राफ नोड `GRAPH_LOG` पर्यावरण व्हेरिएबलद्वारे विविध लॉग स्तरांना खालील स्तरांसह समर्थन देतो: त्रुटी, चेतावणी, माहिती, डीबग किंवा ट्रेस. +Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. -याव्यतिरिक्त `gql` वर `GRAPH_LOG_QUERY_TIMING` सेटिंग केल्याने GraphQL क्वेरी कशा चालत आहेत याबद्दल अधिक तपशील प्रदान करते (जरी हे मोठ्या प्रमाणात लॉग तयार करेल). +In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). -#### देखरेख & चेतावणी +#### Monitoring & alerting -ग्राफ नोड डिफॉल्टनुसार 8040 पोर्टवर प्रोमिथियस एंडपॉइंटद्वारे मेट्रिक्स प्रदान करतो. या मेट्रिक्सची कल्पना करण्यासाठी ग्राफानाचा वापर केला जाऊ शकतो. +Graph Node provides the metrics via Prometheus endpoint on 8040 port by default. Grafana can then be used to visualise these metrics. The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### ग्राफमन -`ग्राफमन` हे ग्राफ नोडसाठी एक देखभाल साधन आहे, जे वेगवेगळ्या दैनंदिन आणि अपवादात्मक कार्यांचे निदान आणि निराकरण करण्यात मदत करते. +`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. -ग्राफमन कमांड अधिकृत कंटेनरमध्ये समाविष्ट केली आहे आणि ती चालवण्यासाठी तुम्ही तुमच्या ग्राफ-नोड कंटेनरमध्ये डॉकर एक्झी करू शकता. यासाठी `config.toml` फाइल आवश्यक आहे. +The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. -`graphman` आदेशांचे संपूर्ण दस्तऐवजीकरण ग्राफ नोड भांडारात उपलब्ध आहे. ग्राफ नोड `/docs` मध्ये \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) पहा +Full documentation of `graphman` commands is available in the Graph Node repository. See [/docs/graphman.md] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` ### सबग्राफसह कार्य करणे #### अनुक्रमणिका स्थिती API -डीफॉल्टनुसार पोर्ट 8030/graphql वर उपलब्ध, अनुक्रमणिका स्थिती API वेगवेगळ्या सबग्राफसाठी अनुक्रमणिका स्थिती तपासण्यासाठी, अनुक्रमणिकेचे पुरावे तपासण्यासाठी, सबग्राफ वैशिष्ट्यांचे निरीक्षण करण्यासाठी आणि बरेच काही करण्यासाठी पद्धतींची श्रेणी उघड करते. +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. -संपूर्ण स्कीमा [येथे](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) उपलब्ध आहे. +The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). #### अनुक्रमणिका कार्यप्रदर्शन There are three separate parts of the indexing process: - प्रदात्याकडून स्वारस्यपूर्ण इव्हेंट आणत आहे -- योग्य हँडलर्ससह इव्हेंट्सवर प्रक्रिया करणे (यामध्ये राज्यासाठी साखळी कॉल करणे आणि स्टोअरमधून डेटा आणणे समाविष्ट असू शकते) +- Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) - परिणामी डेटा स्टोअरमध्ये लिहित आहे -हे टप्पे पाइपलाइन केलेले आहेत (म्हणजे ते समांतरपणे कार्यान्वित केले जाऊ शकतात), परंतु ते एकमेकांवर अवलंबून आहेत. जेथे सबग्राफ अनुक्रमणिकेसाठी मंद असतात, तेथे मूळ कारण विशिष्ट सबग्राफवर अवलंबून असेल. +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. अनुक्रमणिका मंद होण्याची सामान्य कारणे: -- साखळीतून संबंधित इव्हेंट शोधण्यासाठी लागणारा वेळ (`trace_filter` वर अवलंबून राहिल्यामुळे कॉल हँडलर धीमे असू शकतात) +- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) - Making large numbers of `eth_calls` as part of handlers - A large amount of store interaction during execution - A large amount of data to save to the store @@ -276,35 +276,35 @@ There are three separate parts of the indexing process: - प्रदाता स्वतः साखळी डोके मागे घसरण - Slowness in fetching new receipts at the chain head from the provider -सबग्राफ इंडेक्सिंग मेट्रिक्स इंडेक्सिंग मंदतेच्या मूळ कारणाचे निदान करण्यात मदत करू शकतात. काही प्रकरणांमध्ये, समस्या उपग्राफमध्येच असते, परंतु इतरांमध्ये, सुधारित नेटवर्क प्रदाते, कमी डेटाबेस विवाद आणि इतर कॉन्फिगरेशन सुधारणा अनुक्रमणिका कार्यप्रदर्शनात लक्षणीय सुधारणा करू शकतात. +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. #### अयशस्वी सबग्राफ -अनुक्रमणिका दरम्यान सबग्राफ अयशस्वी होऊ शकतात, त्यांना अनपेक्षित डेटा आढळल्यास, काही घटक अपेक्षेप्रमाणे कार्य करत नसल्यास किंवा इव्हेंट हँडलर किंवा कॉन्फिगरेशनमध्ये काही बग असल्यास. अपयशाचे दोन सामान्य प्रकार आहेत: +During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: - Deterministic failures: these are failures which will not be resolved with retries -- नॉन-डिटरमिनिस्टिक अपयश: हे प्रदात्याशी संबंधित समस्या किंवा काही अनपेक्षित ग्राफ नोड त्रुटींमुळे असू शकतात. जेव्हा नॉन-डिटरमिनिस्टिक अपयश येते, तेव्हा ग्राफ नोड अयशस्वी हँडलरचा पुन्हा प्रयत्न करेल, कालांतराने बॅक ऑफ होईल. +- Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. -काही प्रकरणांमध्ये इंडेक्सरद्वारे बिघाडाचे निराकरण केले जाऊ शकते (उदाहरणार्थ त्रुटी योग्य प्रकारचा प्रदाता नसल्यामुळे, आवश्यक प्रदाता जोडल्याने अनुक्रमणिका सुरू ठेवता येईल). तथापि, इतरांमध्ये, सबग्राफ कोडमध्ये बदल आवश्यक आहे. +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> निर्धारक अपयशांना "अंतिम" मानले जाते, अयशस्वी ब्लॉकसाठी अनुक्रमणिकेचा पुरावा व्युत्पन्न केला जातो, तर नॉन-डिटरमिनिस्टिक अपयश असे नसतात, कारण सबग्राफ "अनफल" आणि अनुक्रमणिका सुरू ठेवू शकतो. काही प्रकरणांमध्ये, नॉन-डिटरमिनिस्टिक लेबल चुकीचे आहे, आणि सबग्राफ कधीही त्रुटीवर मात करणार नाही; अशा अपयशांचा ग्राफ नोड रेपॉजिटरीवरील समस्या म्हणून अहवाल द्यावा. +> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-deterministic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. #### ब्लॉक आणि कॉल कॅशे -प्रदात्याकडून रीफेचिंग जतन करण्यासाठी ग्राफ नोड स्टोअरमधील काही डेटा कॅश करतो. `eth_calls` च्या परिणामांप्रमाणेच ब्लॉक्स कॅशे केले जातात (नंतरचे विशिष्ट ब्लॉक म्हणून कॅश केले जातात). हे कॅशिंग थोड्याशा बदललेल्या सबग्राफच्या "रीसिंकिंग" दरम्यान अनुक्रमणिकेची गती नाटकीयरित्या वाढवू शकते. +Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. If a block cache inconsistency is suspected, such as a tx receipt missing event: 1. `graphman chain list` to find the chain name. -2. `ग्राफमन चेन चेक-ब्लॉक बाय-नंबर ` कॅशे केलेला ब्लॉक प्रदात्याशी जुळतो की नाही हे तपासेल आणि तसे नसल्यास कॅशेमधून ब्लॉक हटवेल. +2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. 2. If the block matches the provider, then the issue can be debugged directly against the provider. #### समस्या आणि त्रुटींची चौकशी करणे -एकदा सबग्राफ इंडेक्स केला गेला की, इंडेक्सर्स सबग्राफच्या समर्पित क्वेरी एंडपॉइंटद्वारे क्वेरी सर्व्ह करण्याची अपेक्षा करू शकतात. जर इंडेक्सर महत्त्वपूर्ण क्वेरी व्हॉल्यूम प्रदान करण्याची आशा करत असेल तर, समर्पित क्वेरी नोडची शिफारस केली जाते आणि खूप जास्त क्वेरी व्हॉल्यूम असल्यास, इंडेक्सर्स प्रतिकृती शार्ड्स कॉन्फिगर करू शकतात जेणेकरुन क्वेरींचा अनुक्रमणिका प्रक्रियेवर परिणाम होणार नाही. +Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. @@ -312,34 +312,34 @@ There is not one "silver bullet", but a range of tools for preventing, diagnosin ##### क्वेरी कॅशिंग -ग्राफ नोड डीफॉल्टनुसार GraphQL क्वेरी कॅश करते, जे डेटाबेस लोड लक्षणीयरीत्या कमी करू शकते. हे पुढे `GRAPH_QUERY_CACHE_BLOCKS` आणि `GRAPH_QUERY_CACHE_MAX_MEM` सेटिंग्जसह कॉन्फिगर केले जाऊ शकते - [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching) अधिक वाचा. +Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). ##### प्रश्नांचे विश्लेषण करत आहे -समस्याप्रधान क्वेरी बहुतेक वेळा दोनपैकी एका मार्गाने समोर येतात. काही प्रकरणांमध्ये, वापरकर्ते स्वतः तक्रार करतात की दिलेली क्वेरी धीमी आहे. अशावेळी मंदपणाच्या कारणाचे निदान करणे हे आव्हान असते - मग ती सामान्य समस्या असो, किंवा त्या सबग्राफ किंवा क्वेरीशी संबंधित असो. आणि मग नक्कीच, शक्य असल्यास, निराकरण करण्यासाठी. +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. -इतर प्रकरणांमध्ये, ट्रिगर हा क्वेरी नोडवर उच्च मेमरी वापर असू शकतो, अशा परिस्थितीत समस्या निर्माण करणारी क्वेरी ओळखण्याचे आव्हान प्रथम आहे. +In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. -इंडेक्सर्स ग्राफ नोडच्या क्वेरी लॉगची प्रक्रिया आणि सारांश देण्यासाठी [qlog](https://github.com/graphprotocol/qlog/) वापरू शकतात. `GRAPH_LOG_QUERY_TIMING` मंद क्वेरी ओळखण्यात आणि डीबग करण्यात मदत करण्यासाठी देखील सक्षम केले जाऊ शकते. +Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. -धीमे क्वेरी दिल्यास, इंडेक्सर्सकडे काही पर्याय आहेत. समस्याप्रधान क्वेरी पाठविण्याच्या खर्चात लक्षणीय वाढ करण्यासाठी अर्थातच ते त्यांच्या किंमतीचे मॉडेल बदलू शकतात. यामुळे त्या क्वेरीची वारंवारता कमी होऊ शकते. तथापि, हे सहसा समस्येचे मूळ कारण सोडवत नाही. +Given a slow query, indexers have a few options. Of course they can alter their cost model, to significantly increase the cost of sending the problematic query. This may result in a reduction in the frequency of that query. However this often doesn't resolve the root cause of the issue. ##### खात्यासारखे ऑप्टिमायझेशन -डाटाबेस सारणी जे संस्था संग्रहित करतात ते साधारणपणे दोन प्रकारात येतात: 'व्यवहारासारखे', जेथे संस्था, एकदा तयार केल्या गेल्या, कधीही अद्यतनित केल्या जात नाहीत, म्हणजे, ते आर्थिक व्यवहारांच्या सूचीसारखे काहीतरी संग्रहित करतात आणि 'खाते-सारखे' जेथे संस्था बर्‍याचदा अपडेट केले जातात, म्हणजे, ते आर्थिक खात्यांसारखे काहीतरी संग्रहित करतात जे प्रत्येक वेळी व्यवहार रेकॉर्ड केल्यावर सुधारित केले जातात. खात्यासारख्या सारण्यांचे वैशिष्ट्य असे आहे की त्यामध्ये मोठ्या संख्येने अस्तित्व आवृत्त्या आहेत, परंतु तुलनेने काही वेगळे घटक आहेत. बर्‍याचदा, अशा सारण्यांमध्ये भिन्न घटकांची संख्या एकूण पंक्तींच्या 1% असते (संस्था आवृत्ती) +Database tables that store entities seem to generally come in two varieties: 'transaction-like', where entities, once created, are never updated, i.e., they store something akin to a list of financial transactions, and 'account-like' where entities are updated very often, i.e., they store something like financial accounts that get modified every time a transaction is recorded. Account-like tables are characterized by the fact that they contain a large number of entity versions, but relatively few distinct entities. Often, in such tables the number of distinct entities is 1% of the total number of rows (entity versions) -खात्यासारख्या सारण्यांसाठी, `ग्राफ-नोड` क्वेरी व्युत्पन्न करू शकतात जे पोस्टग्रेस एवढ्या उच्च बदलासह डेटा कसा संग्रहित करते याच्या तपशीलाचा फायदा घेतात, म्हणजे अलीकडील ब्लॉक्ससाठी सर्व आवृत्त्या आहेत. अशा सारणीसाठी एकूण स्टोरेजचा एक छोटा उपविभाग. +For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. -कमांड `ग्राफमन आकडेवारी दाखवते डिप्लॉयमेंटमधील प्रत्येक घटक प्रकार/सारणीसाठी, किती वेगळे घटक आणि प्रत्येक सारणीमध्ये किती घटक आवृत्त्या आहेत हे दाखवते. तो डेटा पोस्टग्रेस-अंतर्गत अंदाजांवर आधारित आहे, आणि त्यामुळे अपरिहार्यपणे अशुद्ध आहे, आणि परिमाणाच्या क्रमाने बंद होऊ शकतो. `संस्था` स्तंभातील `-1` म्हणजे पोस्टग्रेसचा असा विश्वास आहे की सर्व पंक्तींमध्ये एक वेगळे अस्तित्व आहे. +The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. -सर्वसाधारणपणे, ज्या सारण्यांमध्ये भिन्न घटकांची संख्या एकूण पंक्ती/संस्थेच्या आवृत्त्यांच्या 1% पेक्षा कमी आहे ते खाते-समान ऑप्टिमायझेशनसाठी चांगले उमेदवार आहेत. जेव्हा `ग्राफमन आकडेवारी दर्शविते` चे आउटपुट सूचित करते की टेबलला या ऑप्टिमायझेशनचा फायदा होऊ शकतो, तेव्हा `ग्राफमन आकडेवारी
    ` सारणीची संपूर्ण गणना करेल - ते धीमे असू शकते, परंतु एकूण घटक आवृत्त्यांमधील भिन्न घटकांच्या गुणोत्तराचे अचूक माप देते. +In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. -एकदा टेबल खात्यासारखे असल्याचे निश्चित केले गेले की, `ग्राफमन स्टॅट्स अकाउंट-समान .
    ` चालवल्याने त्या टेबलवरील क्वेरींसाठी खाते-सारखे ऑप्टिमायझेशन चालू होईल. ऑप्टिमायझेशन `graphman stats account-like --clear .
    ` सह पुन्हा बंद केले जाऊ शकते हे ऑप्टिमायझेशन चालू केले आहे हे लक्षात येण्यासाठी क्वेरी नोड्ससाठी 5 मिनिटे लागतात किंवा बंद. ऑप्टिमायझेशन चालू केल्यानंतर, बदलामुळे त्या टेबलसाठी प्रश्नांची गती कमी होत नाही हे सत्यापित करणे आवश्यक आहे. तुम्ही पोस्टग्रेसचे निरीक्षण करण्यासाठी Grafana कॉन्फिगर केले असल्यास, स्लो क्वेरी `pg_stat_activity`मध्‍ये मोठ्या संख्येने दिसतील, काही सेकंद घेतील. अशा परिस्थितीत, ऑप्टिमायझेशन पुन्हा बंद करणे आवश्यक आहे. +Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -युनिस्‍ॅप सारख्या सबग्राफसाठी, `जोडी` आणि `टोकन` सारण्या या ऑप्टिमायझेशनसाठी प्रमुख उमेदवार आहेत आणि डेटाबेस लोडवर नाटकीय प्रभाव टाकू शकतात. +For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. #### सबग्राफ काढून टाकत आहे > This is new functionality, which will be available in Graph Node 0.29.x -काही ठिकाणी इंडेक्सरला दिलेला सबग्राफ काढायचा असेल. हे `ग्राफमॅन ड्रॉप` द्वारे सहजपणे केले जाऊ शकते, जे उपयोजन आणि सर्व अनुक्रमित डेटा हटवते. उपयोजन एकतर सबग्राफ नाव, IPFS हॅश `Qm..` किंवा डेटाबेस नेमस्पेस `sgdNNN` म्हणून निर्दिष्ट केले जाऊ शकते. पुढील दस्तऐवजीकरण [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) उपलब्ध आहे. +At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). From 39ff7276606adb8ec25d7dd2982061aa07c65535 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:43 -0500 Subject: [PATCH 0131/1534] New translations graph-node.mdx (Hindi) --- .../pages/hi/indexing/tooling/graph-node.mdx | 224 +++++++++--------- 1 file changed, 112 insertions(+), 112 deletions(-) diff --git a/website/src/pages/hi/indexing/tooling/graph-node.mdx b/website/src/pages/hi/indexing/tooling/graph-node.mdx index d8707dae894c..495131fcd266 100644 --- a/website/src/pages/hi/indexing/tooling/graph-node.mdx +++ b/website/src/pages/hi/indexing/tooling/graph-node.mdx @@ -1,38 +1,38 @@ --- -title: ग्राफ नोड का परिचालन +title: ग्राफ-नोड --- -ग्राफ़ नोड वह घटक है जो उप-अनुच्छेदों को अनुक्रमित करता है, और परिणामी डेटा को ग्राफ़िकल एपीआई के माध्यम से क्वेरी के लिए उपलब्ध कराता है। इस तरह यह इंडेक्सर स्टैक के लिए केंद्रीय है, और एक सफल इंडेक्सर चलाने के लिए ग्राफ नोड का सही संचालन महत्वपूर्ण है। +Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. -यह ग्राफ़ नोड का एक प्रासंगिक अवलोकन प्रदान करता है, और इंडेक्सर्स के लिए उपलब्ध कुछ और उन्नत विकल्प प्रदान करता है। विस्तृत दस्तावेज़ और निर्देश [ग्राफ़ नोड रिपॉजिटरी](https://github.com/graphprotocol/graph-node) में देखे जा सकते हैं। +ग्राफ-नोड का संदर्भ और indexers के लिए उपलब्ध कुछ उन्नत विकल्पों का परिचय प्रदान करता है। विस्तृत दस्तावेज़ और निर्देश [Graph Node repository](https://github.com/graphprotocol/graph-node) में पाए जा सकते हैं। -## Graph Node +## ग्राफ-नोड -[ग्राफ़ नोड](https://github.com/graphprotocol/graph-node) ग्राफ़ नेटवर्क पर सबग्राफ़ को इंडेक्स करने, ब्लॉकचेन क्लाइंट से कनेक्ट करने, सबग्राफ़ को इंडेक्स करने और इंडेक्स किए गए डेटा को उपलब्ध कराने के लिए संदर्भ कार्यान्वयन है पूछताछ करने के लिए। +[Graph Node](https://github.com/graphprotocol/graph-node) The Graph Network पर सबग्राफ को indexing करने के लिए रेफरेंस इंप्लीमेंटेशन है, जो ब्लॉकचेन क्लाइंट्स से जुड़ता है, सबग्राफ को indexing करता है और इंडेक्स किए गए डेटा को queries के लिए उपलब्ध कराता है। -ग्राफ़ नोड (और संपूर्ण इंडेक्सर स्टैक) को नंगे धातु, या क्लाउड वातावरण में चलाया जा सकता है। सेंट्रल इंडेक्सिंग कंपोनेंट का यह लचीलापन द ग्राफ प्रोटोकॉल की मजबूती के लिए महत्वपूर्ण है। इसी तरह, ग्राफ़ नोड [स्रोत से निर्मित](https://github.com/graphprotocol/graph-node) हो सकता है, या इंडेक्सर [डॉकर छवियां प्रदान करता है](https:// में से किसी एक का उपयोग कर सकते हैं। hub.docker.com/r/graphprotocol/graph-node)। +Graph Node (और पूरा indexer stack) को bare metal पर या एक cloud environment में चलाया जा सकता है। The Graph Protocol की मजबूती के लिए केंद्रीय indexing घटक की यह लचीलापन बहुत महत्वपूर्ण है। इसी तरह, ग्राफ-नोड को [साधन से बनाया जा सकता](https://github.com/graphprotocol/graph-node) है, या indexers [प्रदत्त Docker Images](https://hub.docker.com/r/graphprotocol/graph-node) में से एक का उपयोग कर सकते हैं। ### पोस्टग्रेएसक्यूएल डेटाबेस -ग्राफ नोड के लिए मुख्य स्टोर, यह वह जगह है जहां सबग्राफ डेटा संग्रहीत किया जाता है, साथ ही सबग्राफ के बारे में मेटाडेटा, और सबग्राफ-एग्नोस्टिक नेटवर्क डेटा जैसे ब्लॉक कैश, और eth_call कैश। +The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. ### नेटवर्क क्लाइंट किसी नेटवर्क को इंडेक्स करने के लिए, ग्राफ़ नोड को एथेरियम-संगत JSON-RPC के माध्यम से नेटवर्क क्लाइंट तक पहुंच की आवश्यकता होती है। यह आरपीसी एक एथेरियम क्लाइंट से जुड़ सकता है या यह एक अधिक जटिल सेटअप हो सकता है जो कई में संतुलन लोड करता है। -जबकि कुछ सबग्राफ को केवल एक पूर्ण एथेरियम नोड की आवश्यकता हो सकती है, कुछ में इंडेक्सिंग सुविधाएं हो सकती हैं जिनके लिए अतिरिक्त आरपीसी कार्यक्षमता की आवश्यकता होती है। विशेष रूप से सबग्राफ जो इंडेक्सिंग के हिस्से के रूप में `eth_calls` बनाते हैं, उन्हें एक आर्काइव नोड की आवश्यकता होगी जो [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) को सपोर्ट करता हो।, और `callHandlers` वाले सबग्राफ, या `call` फ़िल्टर वाले `blockHandlers`, `trace_filter` समर्थन की आवश्यकता होती है ([ट्रेस मॉड्यूल दस्तावेज़ यहां देखें](https://openethereum.github.io/JSONRPC-trace-module))। +कुछ सबग्राफ को केवल एक पूर्ण नोड की आवश्यकता हो सकती है, लेकिन कुछ में indexing फीचर्स होते हैं, जिनके लिए अतिरिक्त RPC कार्यक्षमता की आवश्यकता होती है। विशेष रूप से, ऐसे सबग्राफ जो indexing के हिस्से के रूप में `eth_calls` करते हैं, उन्हें एक आर्काइव नोड की आवश्यकता होगी जो [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) को सपोर्ट करता हो। साथ ही, ऐसे सबग्राफ जिनमें `callHandlers` या `blockHandlers` के साथ एक `call` फ़िल्टर हो, उन्हें `trace_filter` सपोर्ट की आवश्यकता होती है ([trace module documentation यहां देखें](https://openethereum.github.io/JSONRPC-trace-module))। -**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**नेटवर्क फायरहोज़** - फायरहोज़ एक gRPC सेवा है जो ब्लॉक्स का क्रमबद्ध, फिर भी फोर्क-अवेयर स्ट्रीम प्रदान करती है। इसे The Graph के कोर डेवलपर्स द्वारा बड़े पैमाने पर प्रभावी indexing का समर्थन करने के लिए विकसित किया गया है। यह वर्तमान में Indexer के लिए अनिवार्य नहीं है, लेकिन Indexers को इस तकनीक से परिचित होने के लिए प्रोत्साहित किया जाता है ताकि वे नेटवर्क के पूर्ण समर्थन के लिए तैयार रहें। फायरहोज़ के बारे में अधिक जानें [यहां](https://firehose.streamingfast.io/)। ### आईपीएफएस नोड्स -सबग्राफ परिनियोजन मेटाडेटा IPFS नेटवर्क पर संग्रहीत है। सबग्राफ मैनिफ़ेस्ट और सभी लिंक की गई फ़ाइलों को लाने के लिए सबग्राफ़ परिनियोजन के दौरान ग्राफ़ नोड मुख्य रूप से IPFS नोड तक पहुँचता है। नेटवर्क इंडेक्सर्स को अपने स्वयं के IPFS नोड को होस्ट करने की आवश्यकता नहीं है। नेटवर्क के लिए IPFS नोड https://ipfs.network.thegraph.com पर होस्ट किया गया है। +Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. ### प्रोमेथियस मेट्रिक्स सर्वर -निगरानी और रिपोर्टिंग को सक्षम करने के लिए, ग्राफ़ नोड वैकल्पिक रूप से मेट्रिक्स को प्रोमेथियस मेट्रिक्स सर्वर पर लॉग कर सकता है। +To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### स्रोत से प्रारंभ करना +### Getting started from source #### Install prerequisites @@ -42,7 +42,7 @@ title: ग्राफ नोड का परिचालन - **IPFS** -- **उबंटू उपयोगकर्ताओं के लिए अतिरिक्त आवश्यकताएं** - उबंटू पर ग्राफ नोड चलाने के लिए कुछ अतिरिक्त पैकेजों की आवश्यकता हो सकती है। +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpq-dev libssl-dev pkg-config @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpq-dev libssl-dev pkg-config #### Setup -1. एक PostgreSQL डेटाबेस सर्वर प्रारंभ करें +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. [ग्राफ़ नोड](https://github.com/graphprotocol/graph-node) रेपो क्लोन करें और `cargo build` चलाकर स्रोत बनाएं +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. अब जब सभी निर्भरताएँ स्थापित हो गई हैं, तो ग्राफ़ नोड प्रारंभ करें: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -69,37 +69,37 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -### कुबेरनेट्स के साथ शुरुआत करना +### Getting started with Kubernetes -एक पूर्ण कुबेरनेट्स उदाहरण विन्यास [इंडेक्सर रिपॉजिटरी](https://github.com/graphprotocol/indexer/tree/main/k8s) में पाया जा सकता है। +Kubernetes का एक पूर्ण उदाहरण कॉन्फ़िगरेशन [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s) में पाया जा सकता है। ### Ports -जब यह चल रहा होता है तो ग्राफ़ नोड निम्नलिखित पोर्ट को उजागर करता है: +When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (सबग्राफ प्रश्नों के लिए) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (सबग्राफ सब्सक्रिप्शन के लिए) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (तैनाती के प्रबंधन के लिए) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| Port | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------------------------------- | ---------------------------------------------- | ------------------ | -------------------- | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | \--http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | \--ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | \--admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | \--index-node-port | - | +| 8040 | Prometheus metrics | /metrics | \--metrics-port | - | -> **महत्वपूर्ण**: बंदरगाहों को सार्वजनिक रूप से उजागर करने के बारे में सावधान रहें - **प्रशासन बंदरगाहों** को बंद रखा जाना चाहिए। इसमें ग्राफ़ नोड JSON-RPC समापन बिंदु शामिल है। +> **प्रमुख बात**: सार्वजनिक रूप से पोर्ट्स को एक्सपोज़ करने में सावधानी बरतें - \*\*प्रशासनिक पोर्ट्स को लॉक रखना चाहिए। इसमें ग्राफ नोड JSON-RPC एंडपॉइंट भी शामिल है। -## उन्नत ग्राफ़ नोड कॉन्फ़िगरेशन +## Advanced Graph Node configuration -अपने सरलतम रूप में, ग्राफ़ नोड को ग्राफ़ नोड, एक एकल PostgreSQL डेटाबेस, एक IPFS नोड, और नेटवर्क क्लाइंट के रूप में संचालित किया जा सकता है, जैसा कि उप-अनुच्छेदों द्वारा अनुक्रमित किया जाना आवश्यक है। +At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. -उन ग्राफ़ नोड्स का समर्थन करने के लिए कई ग्राफ़ नोड्स और कई डेटाबेस जोड़कर इस सेटअप को क्षैतिज रूप से बढ़ाया जा सकता है। उन्नत उपयोगकर्ता `config.toml` फ़ाइल और ग्राफ़ नोड के पर्यावरण चर के माध्यम से ग्राफ़ नोड की कुछ क्षैतिज स्केलिंग क्षमताओं के साथ-साथ कुछ अधिक उन्नत कॉन्फ़िगरेशन विकल्पों का लाभ उठाना चाह सकते हैं। +इस सेटअप को क्षैतिज रूप से स्केल किया जा सकता है, कई Graph नोड और उन Graph नोड को समर्थन देने के लिए कई डेटाबेस जोड़कर। उन्नत उपयोगकर्ता ग्राफ-नोड की कुछ क्षैतिज स्केलिंग क्षमताओं का लाभ उठाना चाह सकते हैं, साथ ही कुछ अधिक उन्नत कॉन्फ़िगरेशन विकल्पों का भी, `config.toml` फ़ाइल और ग्राफ-नोड के पर्यावरण वेरिएबल्स के माध्यम से। ### `config.toml` -एक [TOML](https://toml.io/en/) कॉन्फ़िगरेशन फ़ाइल का उपयोग CLI में प्रदर्शित कॉन्फ़िगरेशन की तुलना में अधिक जटिल कॉन्फ़िगरेशन सेट करने के लिए किया जा सकता है। फ़ाइल का स्थान --config कमांड लाइन स्विच के साथ दिया जाता है। +A [TOML](https://toml.io/en/) कॉन्फ़िगरेशन फ़ाइल का उपयोग CLI में उजागर किए गए अधिक जटिल कॉन्फ़िगरेशन सेट करने के लिए किया जा सकता है। फ़ाइल का स्थान --config कमांड लाइन स्विच के साथ पास किया जाता है। > कॉन्फ़िगरेशन फ़ाइल का उपयोग करते समय, --postgres-url, --postgres-secondary-hosts, और --postgres-host-weights विकल्पों का उपयोग करना संभव नहीं है। -एक न्यूनतम `config.toml` फ़ाइल प्रदान की जा सकती है; निम्न फ़ाइल --postgres-url कमांड लाइन विकल्प का उपयोग करने के बराबर है: +एक न्यूनतम `config.toml` फ़ाइल प्रदान की जा सकती है; निम्नलिखित फ़ाइल का उपयोग --postgres-url कमांड लाइन विकल्प के समान है: ```toml [store] @@ -110,19 +110,19 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -`config.toml` का पूरा दस्तावेज़ [ग्राफ़ नोड में पाया जा सकता है डॉक्स](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md)। +`config.toml` की पूरी डॉक्यूमेंटेशन [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md) में मिल सकती है। -#### एकाधिक ग्राफ नोड्स +#### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +ग्राफ-नोड indexing को क्षैतिज रूप से स्केल किया जा सकता है, कई ग्राफ-नोड instances चलाकर indexing और queries को विभिन्न नोड्स पर विभाजित किया जा सकता है। यह सरलता से किया जा सकता है, जब Graph नोड को एक अलग `node_id` के साथ शुरू किया जाता है (जैसे कि Docker Compose फ़ाइल में), जिसे फिर `config.toml` फ़ाइल में [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestion) को निर्दिष्ट करने के लिए और [deployment rules](#deployment-rules) के साथ सबग्राफ को नोड्स के बीच विभाजित करने के लिए इस्तेमाल किया जा सकता है। > ध्यान दें कि एक ही डेटाबेस का उपयोग करने के लिए कई ग्राफ़ नोड्स को कॉन्फ़िगर किया जा सकता है, जिसे स्वयं शार्डिंग के माध्यम से क्षैतिज रूप से बढ़ाया जा सकता है। -#### परिनियोजन नियम +#### Deployment rules -कई ग्राफ़ नोड्स को देखते हुए, नए सबग्राफ की तैनाती का प्रबंधन करना आवश्यक है ताकि एक ही सबग्राफ को दो अलग-अलग नोड्स द्वारा अनुक्रमित नहीं किया जा सके, जिससे टकराव हो। यह परिनियोजन नियमों का उपयोग करके किया जा सकता है, जो यह भी निर्दिष्ट कर सकता है कि यदि डेटाबेस शार्डिंग का उपयोग किया जा रहा है, तो `shard` को सबग्राफ के डेटा में संग्रहीत किया जाना चाहिए। डिप्लॉयमेंट नियम सबग्राफ नाम और उस नेटवर्क से मेल खा सकते हैं जिसे निर्णय लेने के लिए डिप्लॉयमेंट इंडेक्स कर रहा है। +यहां कई Graph नोड दिए गए हैं, इसलिए नए सबग्राफ की तैनाती का प्रबंधन करना आवश्यक है ताकि एक ही subgraph को दो विभिन्न नोड द्वारा इंडेक्स न किया जाए, क्योंकि इससे टकराव हो सकता है। यह deployment नियमों का उपयोग करके किया जा सकता है, जो यह भी निर्दिष्ट कर सकते हैं कि यदि डेटाबेस sharding का उपयोग किया जा रहा है, तो subgraph का डेटा किस `shard` में स्टोर किया जाना चाहिए। Deployment नियम subgraph के नाम और उस नेटवर्क पर मिलान कर सकते हैं जिसमें तैनाती indexing हो रही है, ताकि निर्णय लिया जा सके। -उदाहरण परिनियोजन नियम कॉन्फ़िगरेशन: +Example deployment rule configuration: ```toml [deployment] @@ -150,51 +150,51 @@ indexers = [ ] ``` -परिनियोजन नियमों के बारे में [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment) अधिक पढ़ें। +डिप्लॉयमेंट नियमों के बारे में अधिक पढ़ें [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment)। -#### समर्पित क्वेरी नोड्स +#### Dedicated query nodes -कॉन्फ़िगरेशन फ़ाइल में निम्नलिखित को शामिल करके नोड्स को स्पष्ट रूप से क्वेरी नोड होने के लिए कॉन्फ़िगर किया जा सकता है: +Nodes can be configured to explicitly be query nodes by including the following in the configuration file: ```toml [general] query = "" ``` -कोई भी नोड जिसका --node-id रेगुलर एक्सप्रेशन से मेल खाता है, केवल प्रश्नों का जवाब देने के लिए सेट किया जाएगा। +Any node whose --node-id matches the regular expression will be set up to only respond to queries. -#### शार्डिंग के माध्यम से डाटाबेस स्केलिंग +#### Database scaling via sharding -अधिकांश उपयोग के मामलों के लिए, एक एकल पोस्टग्रेज डेटाबेस ग्राफ-नोड उदाहरण का समर्थन करने के लिए पर्याप्त है। जब एक ग्राफ-नोड उदाहरण एकल पोस्टग्रेज डेटाबेस से आगे निकल जाता है, तो ग्राफ-नोड के डेटा के भंडारण को कई पोस्टग्रेज डेटाबेस में विभाजित करना संभव है। सभी डेटाबेस मिलकर ग्राफ़-नोड इंस्टेंस का स्टोर बनाते हैं। प्रत्येक व्यक्तिगत डेटाबेस को शार्ड कहा जाता है। +For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. -कई डेटाबेस में सबग्राफ परिनियोजन को विभाजित करने के लिए शार्ड्स का उपयोग किया जा सकता है, और डेटाबेस में क्वेरी लोड को फैलाने के लिए प्रतिकृतियों का उपयोग करने के लिए भी उपयोग किया जा सकता है। इसमें उपलब्ध डेटाबेस कनेक्शनों की संख्या को कॉन्फ़िगर करना शामिल है, प्रत्येक ग्राफ-नोड को प्रत्येक डेटाबेस के लिए अपने कनेक्शन पूल में रखना चाहिए, जो तेजी से महत्वपूर्ण हो जाता है क्योंकि अधिक उप-अनुच्छेदों को अनुक्रमित किया जा रहा है। +Shard का उपयोग subgraph deployments को कई डेटाबेस में विभाजित करने के लिए किया जा सकता है, और प्रतिकृति का उपयोग करके query लोड को डेटाबेस में फैलाने के लिए भी किया जा सकता है। इसमें यह कॉन्फ़िगर करना शामिल है कि प्रत्येक डेटाबेस के लिए प्रत्येक `ग्राफ-नोड` को अपने कनेक्शन पूल में कितने उपलब्ध डेटाबेस कनेक्शन रखने चाहिए। जैसे-जैसे अधिक सबग्राफ को index किया जा रहा है, यह अधिक महत्वपूर्ण होता जा रहा है। शेयरिंग तब उपयोगी हो जाती है जब आपका मौजूदा डेटाबेस ग्राफ़ नोड द्वारा डाले गए भार के साथ नहीं रह सकता है, और जब डेटाबेस का आकार बढ़ाना संभव नहीं होता है। -> शार्क से शुरू करने से पहले, आम तौर पर जितना संभव हो उतना बड़ा डेटाबेस बनाना बेहतर होता है। एक अपवाद वह है जहां क्वेरी ट्रैफ़िक सबग्राफ के बीच असमान रूप से विभाजित होता है; उन परिस्थितियों में यह नाटकीय रूप से मदद कर सकता है यदि उच्च मात्रा वाले सबग्राफ को एक शार्ड में रखा जाता है और बाकी सब कुछ दूसरे में रखा जाता है क्योंकि यह सेटअप अधिक संभावना बनाता है कि उच्च मात्रा वाले सबग्राफ के लिए डेटा डीबी-आंतरिक कैश में रहता है और नहीं करता है उन डेटा से प्रतिस्थापित हो जाएं जिनकी कम मात्रा वाले सबग्राफ से उतनी आवश्यकता नहीं है। +> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. -कनेक्शन कॉन्फ़िगर करने के मामले में, postgresql.conf में max_connections से 400 (या शायद 200) पर सेट करें और store_connection_wait_time_ms और store_connection_checkout_count प्रोमेथियस मेट्रिक्स देखें। ध्यान देने योग्य प्रतीक्षा समय (5ms से ऊपर कुछ भी) एक संकेत है कि बहुत कम कनेक्शन उपलब्ध हैं; उच्च प्रतीक्षा समय डेटाबेस के बहुत व्यस्त होने (जैसे उच्च CPU लोड) के कारण भी होगा। हालाँकि यदि डेटाबेस अन्यथा स्थिर लगता है, तो उच्च प्रतीक्षा समय कनेक्शन की संख्या बढ़ाने की आवश्यकता का संकेत देता है। कॉन्फ़िगरेशन में, प्रत्येक ग्राफ़-नोड उदाहरण कितने कनेक्शन का उपयोग कर सकता है, यह एक ऊपरी सीमा है, और ग्राफ़ नोड कनेक्शन को खुला नहीं रखेगा यदि इसकी आवश्यकता नहीं है। +In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. -स्टोर कॉन्फ़िगरेशन के बारे में [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases) अधिक पढ़ें। +[यहाँ](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases) स्टोर कॉन्फ़िगरेशन के बारे में और पढ़ें। -#### समर्पित ब्लॉक अंतर्ग्रहण +#### Dedicated block ingestion -यदि कई नोड्स कॉन्फ़िगर किए गए हैं, तो एक नोड को निर्दिष्ट करना आवश्यक होगा जो नए ब्लॉकों के अंतर्ग्रहण के लिए जिम्मेदार है, ताकि सभी कॉन्फ़िगर किए गए इंडेक्स नोड्स चेन हेड को पोल न करें। यह `चेन्स` नामस्थान के हिस्से के रूप में किया जाता है, ब्लॉक अंतर्ग्रहण के लिए उपयोग किए जाने वाले `node_id` को निर्दिष्ट करता है: +यदि कई नोड्स कॉन्फ़िगर किए गए हैं, तो यह आवश्यक होगा कि एक नोड निर्दिष्ट किया जाए जो नए ब्लॉक्स के इनजेशन के लिए जिम्मेदार हो, ताकि सभी कॉन्फ़िगर किए गए इंडेक्स नोड्स chain हेड को बार-बार पूछताछ न करें। इसे `chains` नेमस्पेस के हिस्से के रूप में किया जाता है, जहां ब्लॉक इनजेशन के लिए उपयोग किए जाने वाले `node_id` को निर्दिष्ट किया जाता है: ```toml [chains] ingestor = "block_ingestor_node" ``` -#### कई नेटवर्क का समर्थन करना +#### Supporting multiple networks -ग्राफ़ प्रोटोकॉल इंडेक्सिंग पुरस्कारों के लिए समर्थित नेटवर्क की संख्या बढ़ा रहा है, और ऐसे कई सबग्राफ मौजूद हैं जो असमर्थित नेटवर्क को इंडेक्स करते हैं जिन्हें एक इंडेक्सर प्रोसेस करना चाहता है। `config.toml` फ़ाइल अभिव्यक्तिपूर्ण और लचीले कॉन्फ़िगरेशन की अनुमति देती है: +The Graph Protocol उन नेटवर्क्स की संख्या बढ़ा रहा है जो indexing रिवार्ड्स के लिए सपोर्टेड हैं, और ऐसे कई सबग्राफ हैं जो अनसपोर्टेड नेटवर्क्स को indexing कर रहे हैं जिन्हें एक indexer प्रोसेस करना चाहेगा। `config.toml` फ़ाइल अभिव्यक्त और लचीली कॉन्फ़िगरेशन की अनुमति देती है: -- एकाधिक नेटवर्क -- प्रति नेटवर्क एकाधिक प्रदाता (यह प्रदाताओं में लोड को विभाजित करने की अनुमति दे सकता है, और पूर्ण नोड्स के साथ-साथ आर्काइव नोड्स के कॉन्फ़िगरेशन की अनुमति भी दे सकता है, यदि कोई वर्कलोड अनुमति देता है तो ग्राफ नोड सस्ता प्रदाताओं को प्राथमिकता देता है)। -- अतिरिक्त प्रदाता विवरण, जैसे सुविधाएँ, प्रमाणीकरण और प्रदाता का प्रकार (प्रायोगिक फ़ायरहोज़ समर्थन के लिए) +- Multiple networks +- Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). +- Additional provider details, such as features, authentication and the type of provider (for experimental Firehose support) -`[chains]` अनुभाग एथेरियम प्रदाताओं को नियंत्रित करता है जो ग्राफ़-नोड से कनेक्ट होते हैं, और जहां प्रत्येक श्रृंखला के लिए ब्लॉक और अन्य मेटाडेटा संग्रहीत होते हैं। निम्न उदाहरण दो श्रृंखलाओं, मेननेट और कोवन को कॉन्फ़िगर करता है, जहां मेननेट के ब्लॉक वीआईपी शार्ड में संग्रहीत होते हैं और कोवन के लिए ब्लॉक प्राथमिक शार्ड में संग्रहीत होते हैं। मेननेट श्रृंखला दो अलग-अलग प्रदाताओं का उपयोग कर सकती है, जबकि कोवन में केवल एक प्रदाता होता है। +`[chains]` अनुभाग उन Ethereum प्रदाताओं को नियंत्रित करता है जिनसे ग्राफ-नोड कनेक्ट होता है और जहाँ प्रत्येक chain के लिए ब्लॉक और अन्य मेटाडेटा संग्रहीत होते हैं। निम्नलिखित उदाहरण दो chain, mainnet और kovan को कॉन्फ़िगर करता है, जहाँ mainnet के लिए ब्लॉक vip shard में संग्रहीत होते हैं और kovan के लिए ब्लॉक primary shard में संग्रहीत होते हैं। mainnet chain दो अलग-अलग प्रदाताओं का उपयोग कर सकती है, जबकि kovan के पास केवल एक प्रदाता है। ```toml [chains] @@ -210,101 +210,101 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -प्रदाता कॉन्फ़िगरेशन के बारे में [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers) अधिक पढ़ें। +एथेरियम प्रदाताओं की कॉन्फ़िगरेशन के बारे में अधिक जानकारी [यहाँ](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers) पढ़ें। ### Environment variables -ग्राफ़ नोड पर्यावरण चर की एक श्रृंखला का समर्थन करता है जो सुविधाओं को सक्षम कर सकता है, या ग्राफ़ नोड व्यवहार को बदल सकता है। ये [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) प्रलेखित हैं। +ग्राफ-नोड कई environment variables का समर्थन करता है, जो सुविधाओं को सक्षम कर सकते हैं या ग्राफ-नोड के व्यवहार को बदल सकते हैं। इन्हें [यहाँ](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) प्रलेखित किया गया है। ### Continuous deployment जो उपयोगकर्ता उन्नत कॉन्फ़िगरेशन के साथ एक स्केल्ड इंडेक्सिंग सेटअप का संचालन कर रहे हैं, वे कुबेरनेट्स के साथ अपने ग्राफ़ नोड्स को प्रबंधित करने से लाभान्वित हो सकते हैं। -- इंडेक्सर रिपॉजिटरी में [उदाहरण Kubernetes संदर्भ](https://github.com/graphprotocol/indexer/tree/main/k8s) है -- [लॉन्चपैड](https://docs.graphops.xyz/launchpad/intro) ग्राफऑप्स द्वारा संचालित कुबेरनेट्स पर ग्राफ प्रोटोकॉल इंडेक्सर चलाने के लिए एक टूलकिट है। यह ग्राफ नोड परिनियोजन का प्रबंधन करने के लिए हेल्म चार्ट और सीएलआई का एक सेट प्रदान करता है। +- Indexer रिपॉजिटरी में एक [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) उपलब्ध है। +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) एक टूलकिट है जो Kubernetes पर Graph Protocol Indexer को चलाने के लिए GraphOps द्वारा मेंटेन किया जाता है। यह Helm चार्ट्स और एक CLI का सेट प्रदान करता है जो ग्राफ-नोड डिप्लॉयमेंट को प्रबंधित करने के लिए उपयोग किया जाता है। -### ग्राफ नोड का प्रबंधन +### Managing Graph Node -चल रहे ग्राफ़ नोड (या ग्राफ़ नोड्स!) को देखते हुए, चुनौती उन नोड्स में तैनात सबग्राफ को प्रबंधित करने की है। ग्राफ़ नोड उप-अनुच्छेदों को प्रबंधित करने में मदद करने के लिए उपकरणों की एक श्रृंखला पेश करता है। +Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. #### लॉगिंग -ग्राफ़ नोड के लॉग ग्राफ़ नोड और विशिष्ट सबग्राफ के डिबगिंग और अनुकूलन के लिए उपयोगी जानकारी प्रदान कर सकते हैं। ग्राफ़ नोड निम्न स्तरों के साथ `GRAPH_LOG` पर्यावरण चर के माध्यम से विभिन्न लॉग स्तरों का समर्थन करता है: त्रुटि, चेतावनी, सूचना, डीबग या ट्रेस। +ग्राफ-नोड के log डिबगिंग और ग्राफ-नोड और विशिष्ट सबग्राफ के ऑप्टिमाइजेशन के लिए उपयोगी जानकारी प्रदान कर सकते हैं। ग्राफ-नोड विभिन्न log स्तरों का समर्थन करता है via `GRAPH_LOG` पर्यावरण चर, जिनमें निम्नलिखित स्तर होते हैं: error, warn, info, debug या trace। -इसके अलावा `GRAPH_LOG_QUERY_TIMING` को `gql` पर सेट करना इस बारे में अधिक विवरण प्रदान करता है कि ग्राफ़क्यूएल क्वेरीज़ कैसे चल रही हैं (हालांकि यह बड़ी मात्रा में लॉग उत्पन्न करेगा)। +GraphQL queries कैसे चल रही हैं, इस बारे में अधिक विवरण प्राप्त करने के लिए `GRAPH_LOG_QUERY_TIMING` को `gql` पर सेट करना उपयोगी हो सकता है (हालांकि इससे बड़ी मात्रा में लॉग उत्पन्न होंगे)। -#### निगरानी & चेतावनी +#### निगरानी और सतर्कता ग्राफ़ नोड डिफ़ॉल्ट रूप से 8040 पोर्ट पर प्रोमेथियस एंडपॉइंट के माध्यम से मेट्रिक्स प्रदान करता है। इन मेट्रिक्स की कल्पना करने के लिए ग्राफाना का उपयोग किया जा सकता है। -इंडेक्सर रिपॉजिटरी [ग्राफाना कॉन्फ़िगरेशन का उदाहरण](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml) प्रदान करता है। +Indexer रिपॉजिटरी एक [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml) प्रदान करती है। #### Graphman -`ग्राफ़मैन` ग्राफ़ नोड के लिए एक रखरखाव टूल है, जो विभिन्न दैनिक और असाधारण कार्यों के निदान और समाधान में मदद करता है। +`graphman` एक maintenance टूल है ग्राफ-नोड के लिए, जो विभिन्न दैनिक और असाधारण कार्यों के निदान और समाधान में मदद करता है। -ग्राफ़मैन कमांड आधिकारिक कंटेनरों में शामिल है, और आप इसे चलाने के लिए अपने ग्राफ़-नोड कंटेनर में docker exec कर सकते हैं। इसके लिए `config.toml` फ़ाइल की आवश्यकता होती है। +The graphman कमांड आधिकारिक कंटेनरों में शामिल है, और आप अपने ग्राफ-नोड कंटेनर में docker exec कमांड का उपयोग करके इसे चला सकते हैं। इसके लिए एक `config.toml` फ़ाइल की आवश्यकता होती है। -`ग्राफ़मैन` कमांड का पूरा दस्तावेज़ ग्राफ़ नोड रिपॉजिटरी में उपलब्ध है। देखें \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) ग्राफ़ नोड `/docs` में +`graphman` कमांड्स का पूरा दस्तावेज़ ग्राफ नोड रिपॉजिटरी में उपलब्ध है। ग्राफ नोड `/docs` में [/docs/graphman.md](https://github.com/graphprotocol/ग्राफ-नोड/blob/master/docs/graphman.md) देखें। ### सबग्राफ के साथ काम करना #### अनुक्रमण स्थिति एपीआई -डिफ़ॉल्ट रूप से पोर्ट 8030/ग्राफ़िकल पर उपलब्ध, इंडेक्सिंग स्टेटस एपीआई विभिन्न सबग्राफ के लिए इंडेक्सिंग स्टेटस की जाँच करने, इंडेक्सिंग के प्रूफ़ की जाँच करने, सबग्राफ़ सुविधाओं का निरीक्षण करने आदि के लिए कई तरीकों को उजागर करता है। +Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. -पूरा स्कीमा [यहां](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) उपलब्ध है। +पूर्ण स्कीमा [यहां](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) उपलब्ध है। -#### अनुक्रमण प्रदर्शन +#### Indexing performance -अनुक्रमण प्रक्रिया के तीन अलग-अलग भाग हैं: +There are three separate parts of the indexing process: -- प्रदाता से रुचि के इवेंट लाए जा रहे हैं +- Fetching events of interest from the provider - उपयुक्त संचालकों के साथ घटनाओं को संसाधित करना (इसमें राज्य के लिए श्रृंखला को कॉल करना और स्टोर से डेटा प्राप्त करना शामिल हो सकता है) -- परिणामी डेटा को स्टोर पर लिखना +- Writing the resulting data to the store -इन चरणों को पाइपलाइन किया गया है (अर्थात इन्हें समानांतर में निष्पादित किया जा सकता है), लेकिन वे एक दूसरे पर निर्भर हैं। जहां सबग्राफ इंडेक्स के लिए धीमे होते हैं, अंतर्निहित कारण विशिष्ट सबग्राफ पर निर्भर करेगा। +These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. -अनुक्रमण धीमा होने के सामान्य कारण: +Common causes of indexing slowness: -- श्रृंखला से प्रासंगिक घटनाओं को खोजने में लगने वाला समय (विशेष रूप से कॉल हैंडलर धीमा हो सकता है, `trace_filter` पर निर्भरता को देखते हुए) -- हैंडलर्स के हिस्से के रूप में बड़ी संख्या में `eth_calls` बनाना -- निष्पादन के दौरान बड़ी मात्रा में स्टोर इंटरैक्शन -- स्टोर में सहेजने के लिए बड़ी मात्रा में डेटा -- संसाधित करने के लिए बड़ी संख्या में ईवेंट -- जलाने के लिए बड़ी संख्या में व्याकुलता -- प्रदाता स्वयं चेन हेड के पीछे पड़ रहा है -- प्रदाता से चेन हेड पर नई रसीदें प्राप्त करने में धीमापन +- Chain से प्रासंगिक आयोजन खोजने में लगने वाला समय (विशेष रूप से कॉल handler धीमे हो सकते हैं, क्योंकि ये `trace_filter` पर निर्भर करते हैं)। +- Handler के हिस्से के रूप में बड़ी संख्या में `eth_calls` करना। +- A large amount of store interaction during execution +- A large amount of data to save to the store +- A large number of events to process +- Slow database connection time, for crowded nodes +- The provider itself falling behind the chain head +- Slowness in fetching new receipts at the chain head from the provider -सबग्राफ इंडेक्सिंग मेट्रिक्स इंडेक्सिंग धीमेपन के मूल कारण का निदान करने में मदद कर सकते हैं। कुछ मामलों में, समस्या सबग्राफ में ही निहित है, लेकिन अन्य में, बेहतर नेटवर्क प्रदाता, कम डेटाबेस विवाद और अन्य कॉन्फ़िगरेशन सुधार इंडेक्सिंग प्रदर्शन में स्पष्ट रूप से सुधार कर सकते हैं। +Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. #### विफल सबग्राफ -इंडेक्सिंग सबग्राफ के दौरान विफल हो सकता है, यदि वे अप्रत्याशित डेटा का सामना करते हैं, कुछ घटक अपेक्षित रूप से काम नहीं कर रहे हैं, या यदि ईवेंट हैंडलर या कॉन्फ़िगरेशन में कुछ बग है। विफलता के दो सामान्य प्रकार हैं: +During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: -- नियतात्मक विफलताएँ: ये ऐसी विफलताएँ हैं जिन्हें पुनर्प्रयास से हल नहीं किया जा सकता है +- Deterministic failures: these are failures which will not be resolved with retries - गैर-नियतात्मक विफलताएँ: ये प्रदाता के साथ समस्याओं या कुछ अप्रत्याशित ग्राफ़ नोड त्रुटि के कारण हो सकती हैं। जब एक गैर-नियतात्मक विफलता होती है, तो ग्राफ़ नोड समय के साथ पीछे हटते हुए विफल हैंडलर को फिर से प्रयास करेगा। -कुछ मामलों में इंडेक्सर द्वारा विफलता को हल किया जा सकता है (उदाहरण के लिए यदि त्रुटि सही प्रकार का प्रदाता नहीं होने का परिणाम है, तो आवश्यक प्रदाता जोड़ने से अनुक्रमण जारी रहेगा)। हालाँकि अन्य में, सबग्राफ कोड में बदलाव की आवश्यकता होती है। +In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. -> नियतात्मक विफलताओं को "अंतिम" माना जाता है, असफल ब्लॉक के लिए उत्पन्न अनुक्रमण के प्रमाण के साथ, जबकि गैर-निर्धारक विफलताओं को नहीं माना जाता है, क्योंकि सबग्राफ "असफल" और अनुक्रमण जारी रखने का प्रबंधन कर सकता है। कुछ मामलों में, गैर-नियतात्मक लेबल गलत है, और सबग्राफ कभी भी त्रुटि को दूर नहीं करेगा; ऐसी विफलताओं को ग्राफ़ नोड रिपॉजिटरी पर मुद्दों के रूप में रिपोर्ट किया जाना चाहिए। +> निश्चितात्मक विफलताएँ "अंतिम" मानी जाती हैं, जिनके लिए विफल ब्लॉक के लिए एक Proof of Indexing उत्पन्न किया जाता है, जबकि अनिर्णायक विफलताएँ नहीं होतीं, क्योंकि Subgraph "अविफल" हो सकता है और indexing जारी रख सकता है। कुछ मामलों में, अनिर्णायक लेबल गलत होता है, और Subgraph कभी भी त्रुटि को पार नहीं कर पाएगा; ऐसी विफलताओं को ग्राफ नोड रिपॉजिटरी पर मुद्दों के रूप में रिपोर्ट किया जाना चाहिए। #### कैश को ब्लॉक और कॉल करें -प्रदाता से रीफ़ेचिंग को बचाने के लिए ग्राफ़ नोड स्टोर में कुछ डेटा को कैश करता है। ब्लॉक को कैश किया जाता है, जैसा कि `eth_calls` के परिणाम होते हैं (बाद वाले को एक विशिष्ट ब्लॉक के रूप में कैश किया जाता है)। यह कैशिंग थोड़े बदले हुए सबग्राफ के "रीसिंकिंग" के दौरान अनुक्रमण गति को नाटकीय रूप से बढ़ा सकती है। +ग्राफ-नोड कुछ डेटा को स्टोर में कैश करता है ताकि प्रोवाइडर से फिर से प्राप्त करने की आवश्यकता न हो। ब्लॉक्स को कैश किया जाता है, साथ ही `eth_calls` के परिणाम (जो कि एक विशिष्ट ब्लॉक से कैश किए जाते हैं)। यह कैशिंग "थोड़े बदले हुए subgraph" के दौरान indexing की गति को नाटकीय रूप से बढ़ा सकती है। -हालाँकि, कुछ उदाहरणों में, यदि एथेरियम नोड ने कुछ अवधि के लिए गलत डेटा प्रदान किया है, तो यह कैश में अपना रास्ता बना सकता है, जिससे गलत डेटा या विफल सबग्राफ हो सकते हैं। इस मामले में इंडेक्सर जहरीली कैश को साफ करने के लिए `ग्राफमैन` का उपयोग कर सकते हैं, और फिर प्रभावित सबग्राफ को रिवाइंड कर सकते हैं, जो तब (उम्मीद है) स्वस्थ प्रदाता से ताजा डेटा प्राप्त करेगा। +यदि कभी Ethereum नोड ने किसी समय अवधि के लिए गलत डेटा प्रदान किया है, तो वह कैश में जा सकता है, जिसके परिणामस्वरूप गलत डेटा या विफल सबग्राफ हो सकते हैं। इस स्थिति में, Indexer `graphman` का उपयोग करके ज़हरीले कैश को हटा सकते हैं, और फिर प्रभावित सबग्राफ को रीवाइंड कर सकते हैं, जो फिर (आशा है) स्वस्थ प्रदाता से ताज़ा डेटा प्राप्त करेंगे। -यदि एक ब्लॉक कैश असंगतता का संदेह है, जैसे कि tx रसीद गुम घटना: +If a block cache inconsistency is suspected, such as a tx receipt missing event: -1. `ग्राफमैन श्रृंखला सूची` श्रृंखला का नाम खोजने के लिए। -2. `ग्राफमैन चेन चेक-ब्लॉक बाई-नंबर ` यह जांच करेगा कि क्या कैश्ड ब्लॉक प्रदाता से मेल खाता है, और यदि ऐसा नहीं होता है तो ब्लॉक को कैश से हटा देता है। - 1. यदि कोई अंतर है, तो `ग्राफमैन चेन ट्रंकेट ` के साथ पूरे कैश को छोटा करना अधिक सुरक्षित हो सकता है। +1. `graphman chain list` का उपयोग करके chain का नाम पता करें। +2. `graphman chain check-blocks by-number ` यह जांच करेगा कि क्या कैश किया हुआ ब्लॉक प्रदाता से मेल खाता है, और यदि यह मेल नहीं खाता है तो ब्लॉक को कैश से हटा देगा। + 1. यदि कोई अंतर है, तो पूरे कैश को `graphman chain truncate ` के साथ हटाना अधिक सुरक्षित हो सकता है। 2. यदि ब्लॉक प्रदाता से मेल खाता है, तो समस्या को सीधे प्रदाता के विरुद्ध डिबग किया जा सकता है। -#### समस्याओं और त्रुटियों को क्वेरी करना +#### Querying issues and errors -एक बार सबग्राफ अनुक्रमित हो जाने के बाद, इंडेक्सर्स सबग्राफ के समर्पित क्वेरी एंडपॉइंट के माध्यम से प्रश्नों की सेवा करने की उम्मीद कर सकते हैं। यदि इंडेक्सर महत्वपूर्ण क्वेरी वॉल्यूम की सेवा करने की उम्मीद कर रहा है, तो एक समर्पित क्वेरी नोड की सिफारिश की जाती है, और बहुत अधिक क्वेरी वॉल्यूम के मामले में, इंडेक्सर्स रेप्लिका शार्ड्स को कॉन्फ़िगर करना चाह सकते हैं ताकि क्वेरी इंडेक्सिंग प्रक्रिया को प्रभावित न करें। +Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. हालाँकि, एक समर्पित क्वेरी नोड और प्रतिकृतियों के साथ भी, कुछ प्रश्नों को निष्पादित करने में लंबा समय लग सकता है, और कुछ मामलों में मेमोरी उपयोग में वृद्धि होती है और अन्य उपयोगकर्ताओं के लिए क्वेरी समय को नकारात्मक रूप से प्रभावित करती है। @@ -312,34 +312,34 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] ##### क्वेरी कैशिंग -ग्राफ़ नोड डिफ़ॉल्ट रूप से ग्राफ़क्यूएल प्रश्नों को कैश करता है, जो डेटाबेस लोड को काफी कम कर सकता है। इसे `GRAPH_QUERY_CACHE_BLOCKS` और `GRAPH_QUERY_CACHE_MAX_MEM` सेटिंग्स के साथ और अधिक कॉन्फ़िगर किया जा सकता है - अधिक [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching) पढ़ें। +ग्राफ-नोड डिफ़ॉल्ट रूप से GraphQL queries को कैश करता है, जिससे डेटाबेस लोड को काफी हद तक कम किया जा सकता है। इसे `GRAPH_QUERY_CACHE_BLOCKS` और `GRAPH_QUERY_CACHE_MAX_MEM` सेटिंग्स के साथ और अधिक कॉन्फ़िगर किया जा सकता है - अधिक पढ़ें [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching)। -##### प्रश्नों का विश्लेषण +##### Analysing queries -समस्याग्रस्त प्रश्न अक्सर दो तरीकों में से एक में सामने आते हैं। कुछ मामलों में, उपयोगकर्ता स्वयं रिपोर्ट करते हैं कि दी गई क्वेरी धीमी है। उस स्थिति में धीमी गति के कारण का निदान करना चुनौती है - चाहे वह एक सामान्य समस्या हो, या उस सबग्राफ या क्वेरी के लिए विशिष्ट हो। और यदि संभव हो तो निश्चित रूप से इसे हल करने के लिए। +Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. अन्य मामलों में, क्वेरी नोड पर ट्रिगर उच्च मेमोरी उपयोग हो सकता है, इस मामले में सबसे पहले समस्या उत्पन्न करने वाली क्वेरी की पहचान करना चुनौती है। -इंडेक्सर [qlog](https://github.com/graphprotocol/qlog/) का इस्तेमाल ग्राफ़ नोड के क्वेरी लॉग को प्रोसेस करने और सारांशित करने के लिए कर सकते हैं। `GRAPH_LOG_QUERY_TIMING` को धीमी क्वेरी को पहचानने और डीबग करने में सहायता के लिए भी सक्षम किया जा सकता है। +Indexers [qlog](https://github.com/graphprotocol/qlog/) का उपयोग करके ग्राफ-नोड के query logs को प्रोसेस और सारांशित कर सकते हैं। धीमे queries की पहचान और डिबग करने में मदद के लिए `GRAPH_LOG_QUERY_TIMING` को भी सक्षम किया जा सकता है। -धीमी क्वेरी को देखते हुए, इंडेक्सर्स के पास कुछ विकल्प होते हैं। निस्संदेह वे अपने लागत मॉडल को बदल सकते हैं, समस्याग्रस्त क्वेरी भेजने की लागत में काफी वृद्धि कर सकते हैं। इसके परिणामस्वरूप उस क्वेरी की आवृत्ति में कमी हो सकती है। हालाँकि यह अक्सर समस्या के मूल कारण को हल नहीं करता है। +Given a slow query, indexers have a few options. Of course they can alter their cost model, to significantly increase the cost of sending the problematic query. This may result in a reduction in the frequency of that query. However this often doesn't resolve the root cause of the issue. -##### खाता जैसा अनुकूलन +##### Account-like optimisation -डेटाबेस तालिकाएँ जो संस्थाओं को स्टोर करती हैं, आम तौर पर दो किस्मों में आती हैं: 'लेन-देन-जैसी', जहाँ संस्थाएँ, एक बार बनने के बाद, कभी भी अपडेट नहीं होती हैं, यानी, वे वित्तीय लेनदेन की सूची के समान कुछ स्टोर करती हैं, और 'खाता-जैसा' जहाँ संस्थाएँ बहुत बार अपडेट किए जाते हैं, यानी, वे वित्तीय खातों की तरह कुछ स्टोर करते हैं जो हर बार लेनदेन रिकॉर्ड होने पर संशोधित हो जाते हैं। खाता-जैसी तालिकाओं की विशेषता इस तथ्य से होती है कि उनमें बड़ी संख्या में इकाई संस्करण होते हैं, लेकिन अपेक्षाकृत कुछ विशिष्ट इकाइयाँ होती हैं। अक्सर, ऐसी तालिकाओं में अलग-अलग संस्थाओं की संख्या पंक्तियों की कुल संख्या (इकाई संस्करण) का 1% होती है +Database tables that store entities seem to generally come in two varieties: 'transaction-like', where entities, once created, are never updated, i.e., they store something akin to a list of financial transactions, and 'account-like' where entities are updated very often, i.e., they store something like financial accounts that get modified every time a transaction is recorded. Account-like tables are characterized by the fact that they contain a large number of entity versions, but relatively few distinct entities. Often, in such tables the number of distinct entities is 1% of the total number of rows (entity versions) -खाता-जैसी तालिकाओं के लिए, `ग्राफ़-नोड` प्रश्नों को उत्पन्न कर सकता है जो इस बात का विवरण देता है कि कैसे पोस्टग्रेज डेटा को इतनी उच्च दर के परिवर्तन के साथ संग्रहीत करता है, अर्थात् हाल के ब्लॉक के सभी संस्करण अंदर हैं ऐसी तालिका के लिए समग्र संग्रहण का एक छोटा उपखंड। +अकाउंट-जैसी तालिकाओं के लिए, `ग्राफ-नोड` ऐसे queries जनरेट कर सकता है जो इस विवरण का लाभ उठाते हैं कि Postgres इतनी तेज़ दर पर डेटा स्टोर करते समय इसे कैसे प्रबंधित करता है। खासतौर पर, हाल के ब्लॉक्स के सभी संस्करण ऐसी तालिका के कुल स्टोरेज के एक छोटे से हिस्से में होते हैं। -आदेश `ग्राफ़मैन आँकड़े दिखाता है, परिनियोजन में प्रत्येक इकाई प्रकार/तालिका के लिए, कितने अलग निकाय हैं, और प्रत्येक तालिका में कितने इकाई संस्करण हैं। वह डेटा पोस्टग्रेज-आंतरिक अनुमानों पर आधारित है, और इसलिए अनिवार्य रूप से सटीक है, और परिमाण के क्रम से बंद हो सकता है। `-1` `entities` कॉलम में इसका मतलब है कि Postgres का मानना है कि सभी पंक्तियों में एक अलग इकाई होती है। +कमांड `graphman stats show प्रत्येक डिप्लॉयमेंट में मौजूद entities प्रकार/टेबल के लिए यह दिखाता है कि प्रत्येक टेबल में कितनी अलग-अलग entities और कितने entities वर्ज़न हैं। यह डेटा Postgres के आंतरिक अनुमानों पर आधारित होता है, और इसलिए यह अनिवार्य रूप से सटीक नहीं होता है और इसमें एक ऑर्डर ऑफ मैग्निट्यूड तक का अंतर हो सकता है। `entities` कॉलम में `-1` का मतलब है कि Postgres मानता है कि सभी पंक्तियां एक अलग entities को शामिल करती हैं। -सामान्य तौर पर, तालिकाएँ जहाँ अलग-अलग संस्थाओं की संख्या पंक्तियों / इकाई संस्करणों की कुल संख्या के 1% से कम होती है, वे खाता-जैसे अनुकूलन के लिए अच्छे उम्मीदवार होते हैं। जब `ग्राफ़मैन आँकड़े दिखाते हैं` का आउटपुट इंगित करता है कि एक तालिका इस अनुकूलन से लाभान्वित हो सकती है, चल रहे `ग्राफ़मैन आँकड़े <टेबल>` तालिका की पूरी गणना करेगा - जो धीमा हो सकता है, लेकिन अलग-अलग इकाइयों के समग्र इकाई संस्करणों के अनुपात का एक सटीक माप देता है। +सामान्यतः, वे तालिकाएँ जहाँ विशिष्ट entities की संख्या कुल पंक्तियों/entities संस्करणों की संख्या का 1% से कम हो, वे खाता-जैसा अनुकूलन के लिए अच्छे उम्मीदवार होती हैं। जब `graphman stats show` का आउटपुट यह दर्शाता है कि कोई तालिका इस optimization से लाभ उठा सकती है, तो `graphman stats show
    ` चलाने पर तालिका की पूरी गणना की जाती है - यह धीमा हो सकता है, लेकिन विशिष्ट entities और कुल entities संस्करणों के अनुपात का सटीक माप प्रदान करता है। -एक बार तालिका को खाता-समान निर्धारित कर लेने के बाद, `ग्राफ़मैन आँकड़े खाता-जैसा .
    ` चलाने से उस तालिका के विरुद्ध प्रश्नों के लिए खाता-जैसा अनुकूलन चालू हो जाएगा। ऑप्टिमाइज़ेशन को फिर से बंद किया जा सकता है `ग्राफ़मैन स्टैटिस्टिक्स अकाउंट-लाइक --clear .
    ` क्वेरी नोड्स को यह नोटिस करने में 5 मिनट तक का समय लगता है कि ऑप्टिमाइज़ेशन चालू कर दिया गया है या बंद। अनुकूलन चालू करने के बाद, यह सत्यापित करना आवश्यक है कि परिवर्तन वास्तव में उस तालिका के लिए प्रश्नों को धीमा नहीं करता है। यदि आपने Postgres की निगरानी के लिए Grafana को कॉन्फ़िगर किया है, तो धीमी क्वेरी बड़ी संख्या में `pg_stat_activity`में दिखाई देगी, जिसमें कई सेकंड लगेंगे। उस स्थिति में, अनुकूलन को फिर से बंद करने की आवश्यकता होती है। +एक बार जब यह तय कर लिया जाता है कि एक तालिका खाता जैसी है, तो `graphman stats account-like .
    ` चलाने से उस तालिका के खिलाफ queries के लिए खाता जैसी अनुकूलन सक्षम हो जाएगा। इस अनुकूलन को फिर से बंद किया जा सकता है `graphman stats account-like --clear .
    ` के साथ। queries नोड्स को यह नोटिस करने में 5 मिनट तक का समय लग सकता है कि अनुकूलन को चालू या बंद किया गया है। अनुकूलन को चालू करने के बाद, यह सत्यापित करना आवश्यक है कि बदलाव वास्तव में उस तालिका के लिए queries को धीमा नहीं कर रहा है। यदि आपने Grafana को Postgres की निगरानी के लिए कॉन्फ़िगर किया है, तो धीमी queries `pg_stat_activity` में बड़ी संख्या में दिखाई देंगी, जो कई सेकंड ले रही हैं। ऐसे में, अनुकूलन को फिर से बंद करने की आवश्यकता होती है। -Uniswap- जैसे सबग्राफ के लिए `pair` और `token` टेबल इस ऑप्टिमाइज़ेशन के लिए प्रमुख उम्मीदवार हैं, और डेटाबेस लोड पर नाटकीय प्रभाव डाल सकते हैं। +Uniswap- जैसे सबग्राफ़ के लिए, `pair` और `token` तालिकाएँ इस अनुकूलन के प्रमुख उम्मीदवार हैं, और ये डेटाबेस लोड पर नाटकीय प्रभाव डाल सकते हैं। #### सबग्राफ हटाना -> यह नई कार्यक्षमता है, जो ग्राफ नोड 0.29.x में उपलब्ध होगी +> This is new functionality, which will be available in Graph Node 0.29.x -किसी बिंदु पर एक अनुक्रमणिका किसी दिए गए सबग्राफ को हटाना चाह सकती है। यह `ग्राफमैन ड्रॉप` के माध्यम से आसानी से किया जा सकता है, जो एक परिनियोजन और उसके सभी अनुक्रमित डेटा को हटा देता है। परिनियोजन को या तो सबग्राफ नाम, IPFS हैश `Qm..`, या डेटाबेस नाम स्थान `sgdNNN` के रूप में निर्दिष्ट किया जा सकता है। आगे के दस्तावेज़ [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) उपलब्ध हैं। +किसी बिंदु पर एक indexer एक दिए गए subgraph को हटाना चाहता है। इसे आसानी से `graphman drop` के माध्यम से किया जा सकता है, जो एक deployment और उसके सभी indexed डेटा को हटा देता है। डिप्लॉयमेंट को subgraph नाम, एक IPFS हैश `Qm..`, या डेटाबेस नामस्थान `sgdNNN` के रूप में निर्दिष्ट किया जा सकता है। आगे की दस्तावेज़ीकरण यहां उपलब्ध है [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop)। From 6093cb8b236e2a245bcdf3dbdca8b21258558546 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:45 -0500 Subject: [PATCH 0132/1534] New translations advanced.mdx (Romanian) --- .../developing/creating/advanced.mdx | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ro/subgraphs/developing/creating/advanced.mdx index bc3a9a86efb6..ee9918f5f254 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Overview @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Non-fatal errors @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). From 3908f5577df6b2b0027bcd0c5feb661abad19337 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:46 -0500 Subject: [PATCH 0133/1534] New translations advanced.mdx (French) --- .../developing/creating/advanced.mdx | 182 +++++++++--------- 1 file changed, 95 insertions(+), 87 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/advanced.mdx b/website/src/pages/fr/subgraphs/developing/creating/advanced.mdx index 68c04eec3ad8..0cbda1512748 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/advanced.mdx @@ -1,20 +1,20 @@ --- -title: Advance Subgraph Features +title: Fonctionnalités avancées des subgraphs --- ## Aperçu -Add and implement advanced subgraph features to enhanced your subgraph's built. +Ajoutez et implémentez des fonctionnalités avancées de subgraph pour améliorer la construction de votre subgraph. -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +À partir de `specVersion` `0.0.4`, les fonctionnalités de subgraph doivent être explicitement déclarées dans la section `features` au niveau supérieur du fichier de manifeste, en utilisant leur nom en `camelCase` comme indiqué dans le tableau ci-dessous : -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | +| Fonctionnalité | Nom | +| ----------------------------------------------------------- | ---------------- | +| [Erreurs non fatales](#non-fatal-errors) | `nonFatalErrors` | +| [Recherche plein texte](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Greffage](#grafting-onto-existing-subgraphs) | `grafting` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +Par exemple, si un subgraph utilise les fonctionnalités **Full-Text Search** et **Non-fatal Errors**, le champ `features` dans le manifeste devrait être : ```yaml specVersion: 0.0.4 @@ -25,13 +25,17 @@ features: dataSources: ... ``` -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +> Notez que L'utilisation d'une fonctionnalité sans la déclarer entraînera une **validation error** lors du déploiement du subgraph, mais aucune erreur ne se produira si une fonctionnalité est déclarée mais non utilisée. ## Séries chronologiques et agrégations -Les séries chronologiques et les agrégations permettent à votre subgraph de suivre des statistiques telles que le prix moyen journalier, le total des transferts par heure, etc. +Prerequisites: -Cette fonctionnalité introduit deux nouveaux types d'entités de subgraph. Les entités de séries chronologiques enregistrent des points de données avec des horodatages. Les entités d'agrégation effectuent des calculs pré-déclarés sur les points de données des séries chronologiques sur une base horaire ou quotidienne, puis stockent les résultats pour un accès facile via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Exemple de schéma @@ -49,25 +53,33 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Définition des Séries Chronologiques et des Agrégations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Intervalles d'Agrégation disponibles -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. +- `hour`: définit la période de séries chronologiques toutes les heures, à l'heure pile. +- `day`: définit la période de séries chronologiques chaque jour, commençant et se terminant à 00:00. #### Fonctions d'Agrégation disponibles -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. +- `sum`: Total de toutes les valeurs. +- `count`: Nombre de valeurs. +- `min`: Valeur minimum. +- `max`: Valeur maximum. +- `first`: Première valeur de la période. +- `last` : Dernière valeur de la période. #### Exemple de requête d'Agrégations @@ -81,17 +93,13 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Remarque: - -Pour utiliser les Séries Chronologiques et les Agrégations, un subgraph doit avoir une spec Version ≥1.1.0. Notez que cette fonctionnalité pourrait subir des changements significatifs affectant la compatibilité rétroactive. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. +[En savoir plus](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) sur les séries chronologiques et Agrégations. ## Erreurs non fatales Les erreurs d'indexation sur les subgraphs déjà synchronisés entraîneront, par défaut, l'échec du subgraph et l'arrêt de la synchronisation. Les subgraphs peuvent également être configurés pour continuer la synchronisation en présence d'erreurs, en ignorant les modifications apportées par le gestionnaire qui a provoqué l'erreur. Cela donne aux auteurs de subgraphs le temps de corriger leurs subgraphs pendant que les requêtes continuent d'être traitées sur le dernier bloc, bien que les résultats puissent être incohérents en raison du bogue à l'origine de l'erreur. Notez que certaines erreurs sont toujours fatales. Pour être non fatale, l'erreur doit être connue pour être déterministe. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Note:** The Graph Network ne supporte pas encore les erreurs non fatales, et les développeurs ne doivent pas déployer de subgraphs utilisant cette fonctionnalité sur le réseau via le Studio. L'activation des erreurs non fatales nécessite la définition de l'indicateur de fonctionnalité suivant sur le manifeste du subgraph : @@ -103,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +La requête doit également opter pour l'interrogation de données avec des incohérences potentielles via l'argument `subgraphError`. Il est également recommandé d'interroger `_meta` pour vérifier si le subgraph a ignoré des erreurs, comme dans l'exemple : ```graphql foos(first: 100, subgraphError: allow) { @@ -115,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +Si le subgraph rencontre une erreur, cette requête renverra à la fois les données et une erreur graphql avec le message `"indexing_error"`, comme dans cet exemple de réponse : ```graphql "data": { @@ -145,15 +153,15 @@ Les sources de données de fichiers sont une nouvelle fonctionnalité de subgrap Plutôt que de récupérer les fichiers "ligne par ligne" pendant l'exécution du gestionnaire, ceci introduit des modèles qui peuvent être générés comme nouvelles sources de données pour un identifiant de fichier donné. Ces nouvelles sources de données récupèrent les fichiers, réessayant en cas d'échec, et exécutant un gestionnaire dédié lorsque le fichier est trouvé. -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. +Cela est similaire aux [modèles de source de données existants](/developing/creating-a-subgraph/#data-source-templates), qui sont utilisés pour créer dynamiquement de nouvelles sources de données basées sur la blockchain. -> This replaces the existing `ipfs.cat` API +> Cela remplace l'API `ipfs.cat` existante ### Guide de mise à niveau -#### Update `graph-ts` and `graph-cli` +#### Mise à jour de `graph-ts` et `graph-cli` -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 +Les fichiers sources de données requièrent graph-ts >=0.29.0 et graph-cli >=0.33.1 #### Ajouter un nouveau type d'entité qui sera mis à jour lorsque des fichiers seront trouvés @@ -202,9 +210,9 @@ type TokenMetadata @entity { Si la relation est 1:1 entre l'entité parent et l'entité de source de données de fichier résultante, le modèle le plus simple consiste à lier l'entité parent à une entité de fichier résultante en utilisant le CID IPFS comme recherche. Contactez Discord si vous rencontrez des difficultés pour modéliser vos nouvelles entités basées sur des fichiers ! -> You can use [nested filters](/subgraphs/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. +> Vous pouvez utiliser [les filtres imbriqués](/subgraphs/querying/graphql-api/#example-for-nested-entity-filtering) pour filtrer les entités parents sur la base de ces entités imbriquées. -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` +#### Ajouter une nouvelle source de données modélisée avec `kind : file/ipfs` ou `kind : file/arweave` Il s'agit de la source de données qui sera générée lorsqu'un fichier d'intérêt est identifié. @@ -224,15 +232,15 @@ templates: file: ./abis/Token.json ``` -> Currently `abis` are required, though it is not possible to call contracts from within file data sources +> Actuellement, les `abis` sont nécessaires, bien qu'il ne soit pas possible d'appeler des contrats à partir de fichiers sources de données -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. +Le fichier source de données doit mentionner spécifiquement tous les types d'entités avec lesquels elle interagira sous `entities`. Voir [limitations](#limitations) pour plus de détails. #### Créer un nouveau gestionnaire pour traiter les fichiers -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/subgraphs/developing/creating/graph-ts/api/#json-api)). +Ce gestionnaire devrait accepter un paramètre `Bytes`, qui sera le contenu du fichier, lorsqu'il sera trouvé, il pourra alors être traité. Il s'agira souvent d'un fichier JSON, qui peut être traité à l'aide des utilitaires de `graph-ts` ([documentation](/subgraphs/developing/creating/graph-ts/api/#json-api)). -The CID of the file as a readable string can be accessed via the `dataSource` as follows: +Le CID du fichier sous forme de chaîne de caractères lisible est accessible via `dataSource` de la manière suivante : ```typescript const cid = dataSource.stringParam() @@ -269,12 +277,12 @@ export function handleMetadata(content: Bytes): void { Vous pouvez désormais créer des sources de données de fichiers lors de l'exécution de gestionnaires basés sur une chaîne : -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave +- Importer le modèle à partir du fichier `templates` généré automatiquement +- appeler `TemplateName.create(cid : string)` à partir d'un mappage, où le cid est un identifiant de contenu valide pour IPFS ou Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +Pour IPFS, Graph Node prend en charge [les identifiants de contenu v0 et v1](https://docs.ipfs.tech/concepts/content-addressing/), et les identifiants de contenu avec des répertoires (par exemple, `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). +Pour Arweave, à partir de la version 0.33.0, Graph Node peut récupérer des fichiers stockés sur Arweave sur la base de leur [identifiant de transaction](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) à partir d'une passerelle Arweave ([exemple de fichier](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave prend en charge les transactions téléchargées via Irys (anciennement Bundlr), et Graph Node peut également récupérer des fichiers sur la base les [manifestes Irys](https://docs.irys.xyz/overview/gateways#indexing). L'exemple: @@ -284,7 +292,7 @@ import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' //Cet exemple de code concerne un sous-graphe de Crypto coven. Le hachage ipfs ci-dessus est un répertoire contenant les métadonnées des jetons pour toutes les NFT de l'alliance cryptographique. -export function handleTransfer(event: TransferEvent): void { +export function handleTransfer(event : TransferEvent) : void { let token = Token.load(event.params.tokenId.toString()) if (!token) { token = new Token(event.params.tokenId.toString()) @@ -307,15 +315,15 @@ export function handleTransfer(event: TransferEvent): void { Cela créera une nouvelle source de données de fichier, qui interrogera le point d'extrémité IPFS ou Arweave configuré du nœud de graphique, en réessayant si elle n'est pas trouvée. Lorsque le fichier est trouvé, le gestionnaire de la source de données de fichier est exécuté. -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. +Cet exemple utilise le CID comme référence entre l'entité parent `Token` et l'entité résultante `TokenMetadata`. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Auparavant, c'est à ce stade qu'un développeur de subgraphs aurait appelé `ipfs.cat(CID)` pour récupérer le fichier Félicitations, vous utilisez des sources de données de fichiers ! #### Déployer vos subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +Vous pouvez maintenant `construire` et `déployer` votre subgraph sur n'importe quel Graph Node >=v0.30.0-rc.0. #### Limitations @@ -333,29 +341,29 @@ En outre, il n'est pas possible de créer des sources de données à partir d'un Si vous liez des métadonnées NFT aux jetons correspondants, utilisez le hachage IPFS des métadonnées pour référencer une entité Metadata à partir de l'entité Token. Enregistrez l'entité Metadata en utilisant le hachage IPFS comme identifiant. -You can use [DataSource context](/subgraphs/developing/creating/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. +Vous pouvez utiliser [Le contexte de DataSource](/subgraphs/developing/creating/graph-ts/api/#entity-and-datasourcecontext) lors de la création de fichiers sources de données(File Data Sources) pour transmettre des informations supplémentaires qui seront disponibles pour le gestionnaire de la File Data Source. -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. +Si vous avez des entités qui sont actualisées plusieurs fois, créez des entités uniques basées sur des fichiers en utilisant le hash IPFS & l'ID de l'entité, puis référencez-les en utilisant un champ dérivé dans l'entité basée sur la chaîne. > Nous travaillons à l'amélioration de la recommandation ci-dessus, afin que les requêtes ne renvoient que la version "la plus récente" #### Problèmes connus -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. +Les fichiers sources de données nécessitent actuellement des ABI, même si les ABI ne sont pas utilisées ([problème](https://github.com/graphprotocol/graph-cli/issues/961)). La solution consiste à ajouter n'importe quel ABI. -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. +Les gestionnaires pour les fichiers sources de données ne peuvent pas être dans des fichiers qui importent des liaisons de contrat `eth_call`, échouant avec "unknown import : `ethereum::ethereum.call` n'a pas été défini" ([problème](https://github.com/graphprotocol/graph-node/issues/4309)). La solution consiste à créer des gestionnaires de fichiers de sources de données dans un fichier dédié. #### Exemples -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) +[migration de subgraph Crypto Coven ](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) #### Les Références -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) +[fichier sources de données GIP](https://forum.thegraph.com/t/gip-file-data-sources/2721) ## Filtres d'Arguments indexés / Filtres de Topics -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` +> **Nécessite** : [SpecVersion](#specversion-releases) >= `1.2.0` Les filtres de topics, également connus sous le nom de filtres d'arguments indexés, sont une fonctionnalité puissante dans les subgraphs qui permettent aux utilisateurs de filtrer précisément les événements de la blockchain en fonction des valeurs de leurs arguments indexés. @@ -367,7 +375,7 @@ Les filtres de topics, également connus sous le nom de filtres d'arguments inde Lorsqu'un contrat intelligent émet un événement, tous les arguments marqués comme indexés peuvent être utilisés comme filtres dans le manifeste d'un subgraph. Ceci permet au subgraph d'écouter de façon sélective les événements qui correspondent à ces arguments indexés. -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. +- Le premier argument indexé de l'événement correspond à `topic1`, le second à `topic2`, et ainsi de suite, jusqu'à `topic3`, puisque la machine virtuelle Ethereum (EVM) autorise jusqu'à trois arguments indexés par événement. ```solidity // SPDX-License-Identifier: MIT @@ -387,9 +395,9 @@ contract Token { Dans cet exemple: -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. +- L'événement `Transfer` est utilisé pour enregistrer les transactions de jetons entre adresses. +- Les paramètres `from` et `to` sont indexés, ce qui permet aux auditeurs d'événements de filtrer et de surveiller les transferts impliquant des adresses spécifiques. +- La fonction `transfer` est une représentation simple d'une action de transfert de jeton, émettant l'événement Transfer à chaque fois qu'elle est appelée. #### Configuration dans les subgraphs @@ -406,7 +414,7 @@ eventHandlers: Dans cette configuration : -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. +- `topic1` correspond au premier argument indexé de l'événement, `topic2` au deuxième et `topic3` au troisième. - Chaque topic peut avoir une ou plusieurs valeurs, et un événement n'est traité que s'il correspond à l'une des valeurs de chaque rubrique spécifiée. #### Logique des Filtres @@ -426,9 +434,9 @@ eventHandlers: Dans cette configuration: -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. +- `topic1` est configuré pour filtrer les événements `Transfer` dont l'expéditeur est `0xAddressA`. +- `topic2` est configuré pour filtrer les événements `Transfer` dont `0xAddressB` est le destinataire. +- Le subgraph n'indexera que les transactions qui se produisent directement de `0xAddressA` à `0xAddressB`. #### Exemple 2 : Suivi des transactions dans les deux sens entre deux ou plusieurs adresses @@ -442,15 +450,15 @@ eventHandlers: Dans cette configuration: -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. +- `topic1` est configuré pour filtrer les événements `Transfer` dont l'expéditeur est `0xAddressA`, `0xAddressB`, `0xAddressC`. +- `topic2` est configuré pour filtrer les événements `Transfer` où `0xAddressB` et `0xAddressC` sont les destinataires. - Le subgraph indexera les transactions qui se produisent dans les deux sens entre plusieurs adresses, permettant une surveillance complète des interactions impliquant toutes les adresses. ## Déclaration eth_call > Remarque : Il s'agit d'une fonctionnalité expérimentale qui n'est pas encore disponible dans une version stable de Graph Node. Vous ne pouvez l'utiliser que dans Subgraph Studio ou sur votre nœud auto-hébergé. -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. +Les `eth_calls' déclaratifs sont une caractéristique précieuse des subgraphs qui permet aux `eth_calls' d'être exécutés à l'avance, ce qui permet à `graph-node` de les exécuter en parallèle. Cette fonctionnalité permet de : @@ -460,11 +468,11 @@ Cette fonctionnalité permet de : ### Concepts clés -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. +- Les `eth_calls` déclaratifs : Appels Ethereum qui sont définis pour être exécutés en parallèle plutôt que séquentiellement. - Exécution en parallèle : Au lieu d'attendre la fin d'un appel avant de commencer le suivant, plusieurs appels peuvent être initiés simultanément. - Efficacité temporelle : Le temps total nécessaire pour tous les appels passe de la somme des temps d'appel individuels (séquentiels) au temps pris par l'appel le plus long (parallèle). -#### Scenario without Declarative `eth_calls` +#### Scénario sans `eth_calls` déclaratifs Imaginez que vous ayez un subgraph qui doit effectuer trois appels Ethereum pour récupérer des données sur les transactions, le solde et les avoirs en jetons d'un utilisateur. @@ -476,7 +484,7 @@ Traditionnellement, ces appels pourraient être effectués de manière séquenti Temps total pris = 3 + 2 + 4 = 9 secondes -#### Scenario with Declarative `eth_calls` +#### Scénario avec `eth_calls` déclaratif Avec cette fonctionnalité, vous pouvez déclarer que ces appels soient exécutés en parallèle : @@ -496,9 +504,9 @@ Temps total pris = max (3, 2, 4) = 4 secondes #### Exemple de configuration dans le manifeste du subgraph -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. +Les `eth_calls` déclarés peuvent accéder à l'adresse `event.address` de l'événement sous-jacent ainsi qu'à tous les paramètres `event.params`. -`Subgraph.yaml` using `event.address`: +`Subgraph.yaml` utilisant `event.address` : ```yaml eventHandlers: @@ -511,12 +519,12 @@ calls: Détails pour l'exemple ci-dessus : -- `global0X128` is the declared `eth_call`. -- The text (`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. +- `global0X128` est le nom déclaré de `eth_call`. +- Le texte (`global0X128`) est le label de ce `eth_call` qui est utilisé lors de la journalisation des erreurs. +- Le texte (`Pool[event.address].feeGrowthGlobal0X128()`) est le `eth_call` réel qui sera exécuté, et est sous la forme de `Contract[address].function(arguments)` +- L'adresse et les arguments peuvent être remplacés par des variables qui seront disponibles lorsque le gestionnaire sera exécuté. -`Subgraph.yaml` using `event.params` +`Subgraph.yaml` utilisant `event.params` ```yaml calls: @@ -525,11 +533,11 @@ calls: ### Greffe sur des subgraphs existants -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). +> **Note:** il n'est pas recommandé d'utiliser le greffage lors de l'upgrade initial vers The Graph Network. Pour en savoir plus [ici](/subgraphs/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +Lorsqu'un subgraph est déployé pour la première fois, il commence à indexer les événements au bloc de initial de la blockchain correspondante (ou au `startBlock` défini avec chaque source de données). Dans certaines circonstances, il est avantageux de réutiliser les données d'un subgraph existant et de commencer l'indexation à un bloc beaucoup plus tardif. Ce mode d'indexation est appelé _Grafting_. Le greffage (grafting) est, par exemple, utile pendant le développement pour surmonter rapidement de simples erreurs dans les mappages ou pour faire fonctionner temporairement un subgraph existant après qu'il ait échoué. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +Un subgraph est greffé sur un subgraph de base lorsque le manifeste du subgraph dans `subgraph.yaml` contient un bloc `graft` au niveau supérieur : ```yaml description: ... @@ -538,18 +546,18 @@ graft: block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +Lorsqu'un subgraph dont le manifeste contient un bloc `graft` est déployé, Graph Node copiera les données du subgraph `de base` jusqu'au bloc spécifié inclus, puis continuera à indexer le nouveau subgraph à partir de ce bloc. Le subgraph de base doit exister sur l'instance cible de Graph Node et doit avoir indexé au moins jusqu'au bloc spécifié. En raison de cette restriction, le greffage ne doit être utilisé que pendant le développement ou en cas d'urgence pour accélérer la production d'un subgraph équivalent non greffé. Étant donné que le greffage copie plutôt que l'indexation des données de base, il est beaucoup plus rapide d'amener le susgraph dans le bloc souhaité que l'indexation à partir de zéro, bien que la copie initiale des données puisse encore prendre plusieurs heures pour de très gros subgraphs. Pendant l'initialisation du subgraph greffé, le nœud graphique enregistrera des informations sur les types d'entités qui ont déjà été copiés. -Le subgraph greffé peut utiliser un schéma GraphQL qui n'est pas identique à celui du subgraph de base, mais simplement compatible avec celui-ci. Il doit s'agir d'un schéma de subgraph valide à part entière, mais il peut s'écarter du schéma du subgraph de base des manières suivantes : +Le subgraph greffé peut utiliser un schema GraphQL qui n'est pas identique à celui du subgraph de base, mais simplement compatible avec lui. Il doit s'agir d'un schema de subgraph valide en tant que tel, mais il peut s'écarter du schema du subgraph de base de la manière suivante : -- Il ajoute ou supprime des types d'entités -- Il supprime les attributs des types d'entités +- Il ajoute ou supprime des types d'entité +- Il supprime les attributs des types d'entité - Il ajoute des attributs nullables aux types d'entités -- Il transforme les attributs non nullables en attributs nullables -- Il ajoute des valeurs aux énumérations +- Il transforme les attributs non nullables en attributs nuls +- Cela ajoute des valeurs aux énumérations - Il ajoute ou supprime des interfaces - Cela change pour quels types d'entités une interface est implémentée -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Gestion des fonctionnalités](#experimental-features):** `grafting` doit être déclaré sous `features` dans le manifeste du subgraph. From 3f108dcf38c41397602b769650df598ecba626e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:47 -0500 Subject: [PATCH 0134/1534] New translations advanced.mdx (Spanish) --- .../developing/creating/advanced.mdx | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/advanced.mdx b/website/src/pages/es/subgraphs/developing/creating/advanced.mdx index e5c406933622..63cf8f312906 100644 --- a/website/src/pages/es/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Descripción @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Errores no fatales @@ -272,7 +280,7 @@ Ahora puedes crear fuentes de datos de archivos durante la ejecución de handler - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). From aec6967c820d72527c574688cb0bc4176984b9f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:48 -0500 Subject: [PATCH 0135/1534] New translations advanced.mdx (Arabic) --- .../developing/creating/advanced.mdx | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ar/subgraphs/developing/creating/advanced.mdx index 243ad181d478..d0f9bb2cc348 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## نظره عامة @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## أخطاء غير فادحة @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -546,10 +554,10 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - يضيف أو يزيل أنواع الكيانات - يزيل الصفات من أنواع الكيانات -- يضيف صفات nullable لأنواع الكيانات -- يحول صفات non-nullable إلى صفات nullable -- يضيف قيما إلى enums -- يضيف أو يزيل الواجهات +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces - يغير للكيانات التي يتم تنفيذ الواجهة لها > **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. From 116e3af53af0f074f47ca5ab630a39a533c07dee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:49 -0500 Subject: [PATCH 0136/1534] New translations advanced.mdx (Czech) --- .../developing/creating/advanced.mdx | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/advanced.mdx b/website/src/pages/cs/subgraphs/developing/creating/advanced.mdx index 4bd34e17c1c1..a8641a40068b 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Přehled @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Nefatální @@ -238,7 +246,7 @@ The CID of the file as a readable string can be accessed via the `dataSource` as const cid = dataSource.stringParam() ``` -Příklad +Příklad ```typescript import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' @@ -272,7 +280,7 @@ Nyní můžete vytvářet zdroje dat souborů během provádění obslužných z - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -542,13 +550,13 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node Protože se při roubování základní data spíše kopírují než indexují, je mnohem rychlejší dostat podgraf do požadovaného bloku než při indexování od nuly, i když počáteční kopírování dat může u velmi velkých podgrafů trvat i několik hodin. Během inicializace roubovaného podgrafu bude uzel Graf Uzel zaznamenávat informace o typů entit, které již byly zkopírovány. -Roubované podgraf může používat schéma GraphQL, které není totožné se schématem základního podgrafu, ale je s ním pouze kompatibilní. Musí to být platné schéma podgrafu jako takové, ale může se od schématu základního podgrafu odchýlit následujícími způsoby: +Štěpovaný podgraf může používat schéma GraphQL, které není totožné se schématem základního podgrafu, ale je s ním pouze kompatibilní. Musí to být platné schéma podgrafu jako takové, ale může se od schématu základního podgrafu odchýlit následujícími způsoby: - Přidává nebo odebírá typy entit - Odstraňuje atributy z typů entit - Přidává nulovatelné atributy k typům entit - Mění nenulovatelné atributy na nulovatelné atributy -- Přidává hodnoty de enums +- Přidává hodnoty do enums - Přidává nebo odebírá rozhraní - Mění se, pro které typy entit je rozhraní implementováno From fda8f2d2a65b53fc03a0a346def70a2fc3a23828 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:50 -0500 Subject: [PATCH 0137/1534] New translations advanced.mdx (German) --- .../developing/creating/advanced.mdx | 34 ++++++++++++------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/advanced.mdx b/website/src/pages/de/subgraphs/developing/creating/advanced.mdx index da0735e7f7d5..1a8debdf98c5 100644 --- a/website/src/pages/de/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/advanced.mdx @@ -1,8 +1,8 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- -## Overview +## Überblick Add and implement advanced subgraph features to enhanced your subgraph's built. @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Non-fatal errors @@ -141,7 +149,7 @@ File data sources are a new subgraph functionality for accessing off-chain data > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. -### Overview +### Überblick Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). From ee838001c9b70c865735834ca91da7f0e099a1e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:52 -0500 Subject: [PATCH 0138/1534] New translations advanced.mdx (Italian) --- .../developing/creating/advanced.mdx | 46 +++++++++++-------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/advanced.mdx b/website/src/pages/it/subgraphs/developing/creating/advanced.mdx index b3bf69928664..94c7d1f0d42d 100644 --- a/website/src/pages/it/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Panoramica @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Errori non fatali @@ -272,7 +280,7 @@ export function handleMetadata(content: Bytes): void { - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -542,14 +550,14 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node Poiché l'innesto copia piuttosto che indicizzare i dati di base, è molto più veloce portare il subgraph al blocco desiderato rispetto all'indicizzazione da zero, anche se la copia iniziale dei dati può richiedere diverse ore per subgraph molto grandi. Mentre il subgraph innestato viene inizializzato, il Graph Node registra le informazioni sui tipi di entità già copiati. -Il grafted subgraph può utilizzare uno schema GraphQL non identico a quello del subgraph di base, ma semplicemente compatibile con esso. Deve essere uno schema di subgraph valido di per sé, ma può discostarsi dallo schema del subgraph di base nei seguenti modi: +The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: -- Aggiunge o rimuove i tipi di entità -- Rimuove gli attributi dai tipi di entità -- Aggiunge attributi annullabili ai tipi di entità -- Trasforma gli attributi non nulli in attributi nulli -- Aggiunge valori agli enum -- Aggiunge o rimuove le interfacce -- Cambia per quali tipi di entità viene implementata un'interfaccia +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces +- It changes for which entity types an interface is implemented > **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. From 1beb59a05819ba52f9479034cf0bf13edc7f3421 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:53 -0500 Subject: [PATCH 0139/1534] New translations advanced.mdx (Japanese) --- .../developing/creating/advanced.mdx | 32 ++++++++++++------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ja/subgraphs/developing/creating/advanced.mdx index cd0d39a6d81e..b6269f49fcf5 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## 概要 @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## 致命的でないエラー @@ -272,7 +280,7 @@ export function handleMetadata(content: Bytes): void { - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -546,7 +554,7 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node - エンティティタイプを追加または削除する - エンティティタイプから属性を削除する -- エンティティタイプに nullable 属性を追加する +- 属性を追エンティティタイプに nullable加する - null 化できない属性を null 化できる属性に変更する - enums に値を追加する - インターフェースの追加または削除 From e96d62a61e12f4dec0bb62ec7ace8c08992b7367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:54 -0500 Subject: [PATCH 0140/1534] New translations advanced.mdx (Korean) --- .../developing/creating/advanced.mdx | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ko/subgraphs/developing/creating/advanced.mdx index bc3a9a86efb6..ee9918f5f254 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Overview @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Non-fatal errors @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). From 92ff7d11f3543dfa636a43fa53dfc886031db5a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:55 -0500 Subject: [PATCH 0141/1534] New translations advanced.mdx (Dutch) --- .../developing/creating/advanced.mdx | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/advanced.mdx b/website/src/pages/nl/subgraphs/developing/creating/advanced.mdx index bc3a9a86efb6..ee9918f5f254 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Overview @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Non-fatal errors @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). From 10c3b93ed26c52fd3c57ffcbe7b05189910280e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:56 -0500 Subject: [PATCH 0142/1534] New translations advanced.mdx (Polish) --- .../developing/creating/advanced.mdx | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/advanced.mdx b/website/src/pages/pl/subgraphs/developing/creating/advanced.mdx index bc3a9a86efb6..ee9918f5f254 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Overview @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Non-fatal errors @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). From 96f8758b3a9258120f37a0bbce0e9556813909f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:57 -0500 Subject: [PATCH 0143/1534] New translations advanced.mdx (Portuguese) --- .../developing/creating/advanced.mdx | 34 ++++++++++++------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/advanced.mdx b/website/src/pages/pt/subgraphs/developing/creating/advanced.mdx index fc407f4281c0..5dfeb1034a5f 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Visão geral @@ -29,9 +29,13 @@ dataSources: ... ## Séries de Tempo e Agregações -Séries de tempo e agregações permitem que o seu subgraph registre estatísticas como médias diárias de preço, total de transferências por hora, etc. +Prerequisites: -Este recurso introduz dois novos tipos de entidade de subgraph. Entidades de série de tempo registram pontos de dados com marcações de tempo. Entidades de agregação realizam cálculos pré-declarados nos pontos de dados de Séries de Tempo numa base por hora ou diária, e depois armazenam os resultados para acesso fácil via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Exemplo de Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Definição de Série de Tempo e Agregações +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Intervalos de Agregação Disponíveis @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Nota: - -Para utilizar Séries de Tempo e Agregações, um subgraph deve ter uma versão de especificação maior que 1.1.0. Note que este recurso pode passar por mudanças significativas que podem afetar a retrocompatibilidade. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Erros não-fatais @@ -272,7 +280,7 @@ Agora pode criar fontes de dados de arquivos durante a execução de handlers ba - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -545,11 +553,11 @@ Como o enxerto copia em vez de indexar dados base, dirigir o subgraph para o blo O subgraph enxertado pode usar um schema GraphQL que não é idêntico ao schema do subgraph base, mas é apenas compatível com ele. Ele deve ser um schema válido no seu próprio mérito, mas pode desviar do schema do subgraph base nas seguintes maneiras: - Ele adiciona ou remove tipos de entidade -- Ele retira atributos de tipos de identidade +- Ele retira atributos de tipos de entidade - Ele adiciona atributos anuláveis a tipos de entidade - Ele transforma atributos não anuláveis em atributos anuláveis - Ele adiciona valores a enums - Ele adiciona ou remove interfaces -- Ele muda para quais tipos de entidades uma interface é implementada +- Ele muda os tipos de entidades para qual implementar uma interface > **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. From 06721f8835993c830de9d05d1ef5139e9186c1dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:45:58 -0500 Subject: [PATCH 0144/1534] New translations advanced.mdx (Russian) --- .../developing/creating/advanced.mdx | 40 +++++++++++-------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ru/subgraphs/developing/creating/advanced.mdx index 568ab172dcc4..a264671c393e 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Обзор @@ -29,9 +29,13 @@ dataSources: ... ## Тайм-серии и агрегации -Тайм-серии и агрегации позволяют Вашему субграфу отслеживать такие статистические данные, как средняя цена за день, общий объем переводов за час и т. д. +Prerequisites: -Эта функция представляет два новых типа объектов субграфов. Объекты тайм-серий записывают точки данных с временными метками. Объекты агрегирования выполняют заранее объявленные вычисления над точками данных тайм-серий ежечасно или ежедневно, а затем сохраняют результаты для быстрого доступа через GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Пример схемы @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Определение тайм-серий и агрегаций +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Доступные интервалы агрегации @@ -81,13 +93,9 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Примечание: - -Чтобы использовать тайм-серии и агрегации, субграф должен иметь версию спецификации ≥1.1.0. Обратите внимание, что эта функция может претерпеть значительные изменения, которые могут повлиять на обратную совместимость. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. -## Неисправимые ошибки +## Нефатальные ошибки Ошибки индексирования в уже синхронизированных субграфах по умолчанию приведут к сбою субграфа и прекращению синхронизации. В качестве альтернативы субграфы можно настроить на продолжение синхронизации при наличии ошибок, игнорируя изменения, внесенные обработчиком, который спровоцировал ошибку. Это дает авторам субграфов время на исправление своих субграфов, в то время как запросы к последнему блоку продолжают обрабатываться, хотя результаты могут быть противоречивыми из-за бага, вызвавшего ошибку. Обратите внимание на то, что некоторые ошибки всё равно всегда будут фатальны. Чтобы быть нефатальной, ошибка должна быть детерминированной. @@ -272,7 +280,7 @@ export function handleMetadata(content: Bytes): void { - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -542,14 +550,14 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node Поскольку графтинг копирует, а не индексирует базовые данные, гораздо быстрее перенести субграф в нужный блок, чем индексировать с нуля, хотя для очень больших субграфов копирование исходных данных может занять несколько часов. Пока графтовый субграф инициализируется, узел The Graph будет регистрировать информацию о типах объектов, которые уже были скопированы. -Графтовый субграф может использовать схему GraphQL, которая не идентична схеме базового субграфа, а просто совместима с ней. Она сама по себе должна быть допустимой схемой субграфа, но может отличаться от схемы базового субграфа следующими способами: +Перенесённый субграф может использовать схему GraphQL, которая не идентична схеме базового субграфа, а просто совместима с ней. Это должна быть автономно действующая схема субграфа, но она может отличаться от схемы базового субграфа следующим образом: - Она добавляет или удаляет типы объектов - Она удаляет атрибуты из типов объектов -- Она добавляет в типы объектов атрибуты с возможностью обнуления -- Она превращает ненулевые атрибуты в нулевые +- Она добавляет обнуляемые атрибуты к типам объектов +- Она превращает необнуляемые атрибуты в обнуляемые - Она добавляет значения в перечисления - Она добавляет или удаляет интерфейсы -- Она изменяется в зависимости от того, для каких типов объектов реализован тот или иной интерфейс +- Она изменяется в зависимости от того, под какой тип объектов реализован интерфейс > **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. From 1c3bc2d54c422926fa5d3bc9c5f9bbd64ecb3360 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:00 -0500 Subject: [PATCH 0145/1534] New translations advanced.mdx (Swedish) --- .../developing/creating/advanced.mdx | 66 +++++++++++-------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/advanced.mdx b/website/src/pages/sv/subgraphs/developing/creating/advanced.mdx index 9831b7e5436e..11a2aa1e6a73 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Översikt @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Icke dödliga fel @@ -272,36 +280,36 @@ Nu kan du skapa filbaserade datakällor under utförandet av kedjebaserade hante - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). Exempel: ```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' +import { TokenMetadata as TokenMetadataTemplate } from "../generated/templates"; -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' +const ipfshash = "QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm"; //Denna exempelkod är för en undergraf för kryptosamverkan. Ovanstående ipfs-hash är en katalog med tokenmetadata för alla kryptosamverkande NFT:er. export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) + let token = Token.load(event.params.tokenId.toString()); if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId + token = new Token(event.params.tokenId.toString()); + token.tokenID = event.params.tokenId; - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI + token.tokenURI = "/" + event.params.tokenId.toString() + ".json"; + const tokenIpfsHash = ipfshash + token.tokenURI; //Detta skapar en sökväg till metadata för en enskild Crypto coven NFT. Den konkaterar katalogen med "/" + filnamn + ".json" - token.ipfsURI = tokenIpfsHash + token.ipfsURI = tokenIpfsHash; - TokenMetadataTemplate.create(tokenIpfsHash) + TokenMetadataTemplate.create(tokenIpfsHash); } - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() + token.updatedAtTimestamp = event.block.timestamp; + token.owner = event.params.to.toHexString(); + token.save(); } ``` @@ -542,14 +550,14 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node Eftersom ympning kopierar data istället för att indexera basdata går det mycket snabbare att få delgrafen till det önskade blocket än att indexera från början, även om den initiala datorkopieringen fortfarande kan ta flera timmar för mycket stora delgrafer. Medan den ympade delgrafen initialiseras kommer Graph Node att logga information om de entitetstyper som redan har kopierats. -Den ympade delgrafen kan använda ett GraphQL-schema som inte är identiskt med basdelgrafens, men bara kompatibelt med den. Det måste vara ett giltigt delgrafschema i sig själv, men kan avvika från basdelgrafens schema på följande sätt: +Den ympade subgrafen kan använda ett GraphQL-schema som inte är identiskt med det i bas subgrafen, utan bara är kompatibelt med det. Det måste vara ett giltigt subgraf schema i sig, men kan avvika från bas undergrafens schema på följande sätt: - Den lägger till eller tar bort entitetstyper -- Den tar bort attribut från entitetstyper -- Den lägger till nollställbara attribut till entitetstyper -- Den gör icke-nollställbara attribut till nollställbara attribut -- Den lägger till värden till enum +- Det tar bort attribut från entitetstyper +- Det tar bort attribut från entitetstyper +- Det förvandlar icke-nullbara attribut till nullbara attribut +- Det lägger till värden till enums - Den lägger till eller tar bort gränssnitt -- Den ändrar vilka entitetstyper som ett gränssnitt är implementerat för +- Det ändrar för vilka entitetstyper ett gränssnitt implementeras > **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. From 5dc6b72f30a5a95ba240f62339046d79ce143524 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:01 -0500 Subject: [PATCH 0146/1534] New translations advanced.mdx (Turkish) --- .../developing/creating/advanced.mdx | 32 ++++++++++++------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/advanced.mdx b/website/src/pages/tr/subgraphs/developing/creating/advanced.mdx index 02697b27d494..980b0069c3e9 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Genel Bakış @@ -29,9 +29,13 @@ dataSources: ... ## Zaman Serileri ve Toplulaştırmalar -Zaman serileri ve toplulaştırmalar, subgraph'inizin günlük ortalama fiyat, saatlik toplam transferler gibi istatistikleri takip etmesini sağlar. +Prerequisites: -Bu özellik, iki yeni subgraph varlık türünü tanıtır. Zaman Serisi varlıkları, zaman damgaları ile veri noktalarını kaydeder. Toplulaştırma varlıkları ise saatlik veya günlük bazda Zaman Serisi veri noktaları üzerinde önceden belirlenmiş hesaplamalar yapar ve sonuçları GraphQL üzerinden kolay erişim için saklar. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Örnek Şema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Zaman Serileri ve Toplulaştırmaları Tanımlama +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Mevcut Toplulaştırma Aralıkları @@ -81,13 +93,9 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Not: - -Zaman Serisi ve Toplulaştırmalar özelliklerini kullanmak için, subgraph’inizin spec sürümü ≥1.1.0 olmalıdır. Bu özelliğin, geriye dönük uyumluluğu etkileyebilecek önemli değişikliklerden geçebileceğini unutmayın. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. -## Kritik olmayan hatalar +## Ölümcül Olmayan Hatalar Halihazırda senkronize edilmiş subgraphlarda indeksleme hataları varsayılan olarak subgraph başarısız olmasına ve senkronizasyonun durmasına neden olur. Hatalara rağmen senkronizasyonun devam etmesi için subgraphlar, hata tetikleyen işleyicinin yapılan değişikliklerini yok sayarak yapılandırılabilir. Bu, subgraph yazarlarının subgraphlarını düzeltmeleri için zaman kazandırırken, sorguların en son blokta sunulmaya devam etmesini sağlar, ancak hata nedeniyle sonuçlar tutarsız olabilir. Bazı hatalar hala her zaman ölümcül olacaktır. Ölümcül olmaması için hatanın belirlenmiş olması gerekmektedir. @@ -272,7 +280,7 @@ Artık zincir tabanlı işleyicilerin yürütülmesi sırasında dosya veri kayn - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). From 0a7b1bc7da3ab35a74cbf20be7226a186820a65f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:02 -0500 Subject: [PATCH 0147/1534] New translations advanced.mdx (Ukrainian) --- .../developing/creating/advanced.mdx | 46 +++++++++++-------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/advanced.mdx b/website/src/pages/uk/subgraphs/developing/creating/advanced.mdx index bc3a9a86efb6..7614511a5617 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Overview @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Non-fatal errors @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -542,14 +550,14 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +Підграф, утворений в результаті може використовувати схему GraphQL, яка не є ідентичною схемі базового підграфа, а лише сумісною з нею. Вона повинна бути валідною схемою підграфа сама по собі, але може відхилятися від схеми базового підграфа у такому випадку: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Додає або видаляє типи елементів +- Видаляє атрибути з типів елементів +- Додає до типів об'єктів атрибути, які можна скасувати +- Перетворює атрибути, які не можна скасувати, на атрибути, які можна скасувати +- Додає значення до переліків +- Додає або видаляє інтерфейси +- Визначає, для яких типів елементів реалізовано інтерфейс > **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. From 4ce06c895b028f659f9eb5207d442c1685c9cc1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:03 -0500 Subject: [PATCH 0148/1534] New translations advanced.mdx (Chinese Simplified) --- .../developing/creating/advanced.mdx | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/advanced.mdx b/website/src/pages/zh/subgraphs/developing/creating/advanced.mdx index aaef1e7aa989..2940048138dd 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## 概述 @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## 非致命错误 @@ -272,7 +280,7 @@ export function handleMetadata(content: Bytes): void { - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -281,7 +289,7 @@ For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave b ```typescript import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' +const ipfshash = "QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm" //This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. export function handleTransfer(event: TransferEvent): void { @@ -349,7 +357,7 @@ Handlers for File Data Sources cannot be in files which import `eth_call` contra [Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) -#### 参考 +#### References [GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) @@ -542,7 +550,7 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node 因为嫁接是拷贝而不是索引基础数据,所以子图同步到所需区块比从头开始索引要快得多,尽管对于非常大的子图,初始数据拷贝仍可能需要几个小时。 在初始化嫁接子图时,Graph 节点将记录有关已复制的实体类型的信息。 -嫁接子图可以使用一个GraphQL模式,该模式与某个基本子图不同,但仅与基本子图兼容。它本身必须是一个有效的子图模式,但是可以通过以下方式偏离基本子图的模式: +嫁接子图可以使用一个 GraphQL 模式 schema,该模式与基子图之一不同,但仅与基子图兼容。它本身必须是一个有效的子图模式,但是可以通过以下方式偏离基子图的模式: - 它添加或删除实体类型 - 它从实体类型中删除属性 From a9ba30c0cd8c0175b07f1973983ccf099d06d4eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:04 -0500 Subject: [PATCH 0149/1534] New translations advanced.mdx (Urdu (Pakistan)) --- .../developing/creating/advanced.mdx | 42 +++++++++++-------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/advanced.mdx b/website/src/pages/ur/subgraphs/developing/creating/advanced.mdx index f5233e0d3ac6..6d3c40d1e663 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## جائزہ @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Non-fatal errors @@ -272,7 +280,7 @@ export function handleMetadata(content: Bytes): void { - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -542,14 +550,14 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node چونکہ گرافٹنگ بیس ڈیٹا کو انڈیکس کرنے کے بجائے کاپی کرتا ہے، شروع سے انڈیکس کرنے کے مقابلے میں مطلوبہ بلاک میں سب گراف حاصل کرنا بہت تیز ہے، حالانکہ ابتدائی ڈیٹا کاپی بہت بڑے سب گراف کے لیے کئی گھنٹے لگ سکتی ہے۔ جب گرافٹ شدہ سب گراف کو شروع کیا جا رہا ہے، گراف نوڈ ان ہستی کی اقسام کے بارے میں معلومات کو لاگ کرے گا جو پہلے ہی کاپی ہو چکی ہیں. -گرافٹڈ سب گراف ایک GraphQL اسکیما استعمال کرسکتا ہے جو بیس سب گراف میں سے ایک سے مماثل نہیں ہے، لیکن اس کے ساتھ محض مطابقت رکھتا ہے۔ یہ اپنے طور پر ایک درست سب گراف سکیما ہونا ضروری ہے، لیکن مندرجہ ذیل طریقوں سے بنیادی سب گراف کے سکیما سے انحراف کر سکتا ہے: +گرافٹڈ سب گراف ایک گراف کیو ایل اسکیما استعمال کرسکتا ہے جو بیس سب گراف میں سے ایک سے مماثل نہیں ہے، لیکن اس کے ساتھ محض مطابقت رکھتا ہے۔ اسے اپنے طور پر ایک درست سب گراف سکیما ہونا چاہیے، لیکن درج ذیل طریقوں سے بنیادی سب گراف کے سکیما سے انحراف ہو سکتا ہے: -- یہ ہستی کی اقسام کو ڈالتا یا ہٹاتا ہے +- یہ ہستی کی اقسام کو جوڑتا یا ہٹاتا ہے - یہ ہستی کی اقسام سے صفات کو ہٹاتا ہے -- یہ ہستی کی اقسام میں nullable صفات کا اضافہ کرتا ہے -- یہ non-nullable صفات کو nullable صفات میں بدل دیتا ہے +- یہ ہستی کی قسموں میں کالعدم صفات کو شامل کرتا ہے +- یہ غیر کالعدم صفات کو کالعدم صفات میں بدل دیتا ہے - یہ enums میں اقدار کا اضافہ کرتا ہے -- یہ انٹرفیس میں اضافہ کرتا یا ہٹاتا ہے -- یہ ان ہستی کی اقسام کے لیے تبدیل ہوتا ہے جن کے لیے ایک انٹرفیس لاگو کیا جاتا ہے +- یہ انٹرفیس کو جوڑتا یا ہٹاتا ہے +- یہ تبدیل ہوتا ہے جس کے لیے ایک انٹرفیس لاگو کیا جاتا ہے > **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. From 2845f30dfa2ba38ee3bfeea0539ec109438d4cbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:06 -0500 Subject: [PATCH 0150/1534] New translations advanced.mdx (Vietnamese) --- .../developing/creating/advanced.mdx | 42 +++++++++++-------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/advanced.mdx b/website/src/pages/vi/subgraphs/developing/creating/advanced.mdx index e02c23f506c5..82d7dd120a70 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## Tổng quan @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## Lỗi không nghiêm trọng @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -544,12 +552,12 @@ Because grafting copies rather than indexes base data, it is much quicker to get The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: -- Nó thêm hoặc xóa các loại thực thể -- Nó loại bỏ các thuộc tính khỏi các loại thực thể -- Nó thêm các thuộc tính nullable vào các loại thực thể -- Nó biến các thuộc tính không thể nullable thành các thuộc tính nullable -- Nó thêm giá trị vào enums -- Nó thêm hoặc xóa các giao diện +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces - Nó thay đổi đối với loại thực thể nào mà một giao diện được triển khai > **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. From c90a22730bd93ec5af50578c453028790669c8d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:07 -0500 Subject: [PATCH 0151/1534] New translations advanced.mdx (Marathi) --- .../developing/creating/advanced.mdx | 32 ++++++++++++------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/advanced.mdx b/website/src/pages/mr/subgraphs/developing/creating/advanced.mdx index 40e8b972fba8..c24f72030078 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## सविश्लेषण @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries and Aggregations -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. +Prerequisites: -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### Example Schema @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### Defining Timeseries and Aggregations +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### Available Aggregation Intervals @@ -81,10 +93,6 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - [Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. ## गैर-घातक त्रुटी @@ -272,7 +280,7 @@ You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -547,7 +555,7 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node - हे घटक प्रकार जोडते किंवा काढून टाकते - हे घटक प्रकारातील गुणधर्म काढून टाकते - हे अस्तित्व प्रकारांमध्ये रद्द करण्यायोग्य विशेषता जोडते -- हे नॉन-नलेबल अॅट्रिब्यूट्सना न्युलेबल अॅट्रिब्यूटमध्ये बदलते +- हे नॉन-नलेबल अॅट्रिब्यूट्सना न्युलेबल अॅट्रिब्यूट्समध्ये बदलते - हे enums मध्ये मूल्ये जोडते - हे इंटरफेस जोडते किंवा काढून टाकते - कोणत्या घटकासाठी इंटरफेस लागू केला जातो ते बदलते From ac95c98c0ca0b5ba40139e8beb95d1a27345cb7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:08 -0500 Subject: [PATCH 0152/1534] New translations advanced.mdx (Hindi) --- .../developing/creating/advanced.mdx | 67 +++++++++++-------- 1 file changed, 38 insertions(+), 29 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/advanced.mdx b/website/src/pages/hi/subgraphs/developing/creating/advanced.mdx index bfd12afcd2f0..631278ebe555 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/advanced.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/advanced.mdx @@ -1,5 +1,5 @@ --- -title: Advance Subgraph Features +title: Advanced Subgraph Features --- ## अवलोकन @@ -29,9 +29,13 @@ dataSources: ... ## Timeseries और Aggregations -Timeseries और aggregations आपके subgraph को दैनिक औसत मूल्य, घंटेवारी कुल स्थानांतरण आदि जैसे आंकड़े ट्रैक करने में सक्षम बनाते हैं। +Prerequisites: -यह विशेषता दो नए प्रकार की 'subgraph' इकाई पेश करती है। 'Timeseries' इकाइयाँ डेटा बिंदुओं को समय की छाप के साथ रिकॉर्ड करती हैं। 'Aggregation' इकाइयाँ 'Timeseries' डेटा बिंदुओं पर पूर्व-घोषित गणनाएँ करती हैं, जो प्रति घंटे या प्रति दिन होती हैं, फिर परिणामों को GraphQL के माध्यम से आसान पहुँच के लिए संग्रहीत करती हैं। +- Subgraph specVersion must be ≥1.1.0. + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, and more. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. ### उदाहरण स्कीमा @@ -49,11 +53,19 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -### टाइमसीरीज और एग्रीगेशन्स को परिभाषित करना +### How to Define Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in the GraphQL schema. Every timeseries entity must: + +- have a unique ID of the int8 type +- have a timestamp of the Timestamp type +- include data that will be used for calculation by aggregation entities. -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. +These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the aggregation entities. -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. +Aggregation entities are defined with `@aggregation` in the GraphQL schema. Every aggregation entity defines the source from which it will gather data (which must be a timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). + +Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. #### उपलब्ध Aggregation अंतराल @@ -81,17 +93,13 @@ Aggregation entities are defined with `@aggregation` in schema.graphql. Every ag } ``` -नोट: - -Timeseries और Aggregations का उपयोग करने के लिए, एक subgraph का spec version ≥1.1.0 होना चाहिए। ध्यान दें कि यह सुविधा महत्वपूर्ण परिवर्तनों का सामना कर सकती है जो पीछे की संगतता को प्रभावित कर सकती है। - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. +[और पढ़ें](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) समय श्रृंखला और संक्षेपण के बारे में। ## गैर-घातक त्रुटियाँ पहले से सिंक किए गए सबग्राफ पर इंडेक्सिंग त्रुटियां, डिफ़ॉल्ट रूप से, सबग्राफ को विफल कर देंगी और सिंक करना बंद कर देंगी। सबग्राफ को वैकल्पिक रूप से त्रुटियों की उपस्थिति में समन्वयन जारी रखने के लिए कॉन्फ़िगर किया जा सकता है, हैंडलर द्वारा किए गए परिवर्तनों को अनदेखा करके त्रुटि उत्पन्न हुई। यह सबग्राफ लेखकों को अपने सबग्राफ को ठीक करने का समय देता है, जबकि नवीनतम ब्लॉक के विरुद्ध प्रश्नों को जारी रखा जाता है, हालांकि त्रुटि के कारण बग के कारण परिणाम असंगत हो सकते हैं। ध्यान दें कि कुछ त्रुटियाँ अभी भी हमेशा घातक होती हैं। गैर-घातक होने के लिए, त्रुटि नियतात्मक होने के लिए जानी जानी चाहिए। -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **ध्यान दें:** The Graph Network अभी तक गैर-घातक त्रुटियों non-fatal errors का समर्थन नहीं करता है, और डेवलपर्स को Studio के माध्यम से उस कार्यक्षमता का उपयोग करके सबग्राफ को नेटवर्क पर परिनियोजित (deploy) नहीं करना चाहिए। गैर-घातक त्रुटियों को सक्षम करने के लिए सबग्राफ मेनिफ़ेस्ट पर निम्न फ़ीचर फ़्लैग सेट करने की आवश्यकता होती है: @@ -103,7 +111,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +Queries को संभावित असंगतियों वाले डेटा को queries करने के लिए `subgraphError` आर्ग्यूमेंट के माध्यम से ऑप्ट-इन करना होगा। यह भी अनुशंसा की जाती है कि `_meta` को queries करें यह जांचने के लिए कि subgraph ने त्रुटियों को स्किप किया है या नहीं, जैसे इस उदाहरण में: ```graphql foos(first: 100, subgraphError: allow) { @@ -115,7 +123,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +यदि subgraph में कोई त्रुटि आती है, तो वह queries डेटा और एक graphql त्रुटि के साथ `"indexing_error"` संदेश लौटाएगी, जैसा कि इस उदाहरण उत्तर में दिखाया गया है: ```graphql "data": { @@ -145,15 +153,15 @@ If the subgraph encounters an error, that query will return both the data and a "लाइन" में हैंडलर कार्यान्वयन के दौरान फ़ाइलों को लाने के बजाय, यह टेम्पलेट्स को पेश करता है जिन्हें एक दिए गए फ़ाइल पहचानकर्ता के लिए नए डेटा स्रोतों के रूप में उत्पन्न किया जा सकता है। ये नए डेटा स्रोत फ़ाइलों को लाते हैं, यदि वे असफल होते हैं तो पुनः प्रयास करते हैं, और जब फ़ाइल मिलती है तो एक समर्पित हैंडलर चलाते हैं। -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. +यह [ existing data साधन templates](/developing/creating-a-subgraph/#data-source-templates) के समान है, जिन्हें नए chain-based data साधन को डायनामिक रूप से बनाने के लिए उपयोग किया जाता है। -> This replaces the existing `ipfs.cat` API +> यह मौजूदा `ipfs.cat` API को प्रतिस्थापित करता है। ### अपग्रेड गाइड -#### Update `graph-ts` and `graph-cli` +#### `graph-ts` और `graph-cli` को अपडेट करें -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 +फ़ाइल डेटा साधन के लिए graph-ts >=0.29.0 और graph-cli >=0.33.1 की आवश्यकता होती है। #### एक नया इकाई प्रकार जोड़ें जो फ़ाइलें मिलने पर अपडेट किया जाएगा @@ -202,9 +210,9 @@ type TokenMetadata @entity { यदि पैरेंट इकाई और परिणामी फ़ाइल डेटा स्रोत इकाई के बीच संबंध 1:1 है, तो सबसे सरल पैटर्न मूल इकाई को लुकअप के रूप में IPFS CID का उपयोग करके परिणामी फ़ाइल इकाई से लिंक करना है। यदि आपको अपनी नई फ़ाइल-आधारित संस्थाओं को मॉडलिंग करने में कठिनाई हो रही है, तो डिस्कॉर्ड पर संपर्क करें! -> You can use [nested filters](/subgraphs/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. +> आप [nested filters](/subgraphs/querying/graphql-api/#example-for-nested-entity-filtering) का उपयोग करके parent entities को इन nested entities के आधार पर फ़िल्टर कर सकते हैं। -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` +#### एक नया टेम्पलेटेड डेटा साधन स्रोत जोड़ें जिसमें `kind: file/ipfs` या `kind: file/arweave` हो। यह वह डेटा स्रोत है जो ब्याज की फ़ाइल की पहचान होने पर उत्पन्न होगा। @@ -224,15 +232,15 @@ templates: file: ./abis/Token.json ``` -> Currently `abis` are required, though it is not possible to call contracts from within file data sources +> वर्तमान में `abis` की आवश्यकता होती है, हालांकि फ़ाइल डेटा साधन के भीतर से अनुबंधों को कॉल contract करना संभव नहीं है। -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. +फाइल डेटा साधन को विशेष रूप से उन सभी entities प्रकारों का उल्लेख करना चाहिए जिनके साथ यह `entities`. के तहत इंटरएक्ट करेगा। अधिक विवरण के लिए [ limitations] (#limitations) देखें। #### फ़ाइलों को संसाधित करने के लिए एक नया हैंडलर बनाएँ -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/subgraphs/developing/creating/graph-ts/api/#json-api)). +यह handler एक `Bytes` पैरामीटर स्वीकार करना चाहिए, जो उस फ़ाइल की सामग्री होगी, जब यह पाई जाएगी, जिसे फिर से प्रोसेस किया जा सकेगा। यह अक्सर एक JSON फ़ाइल होगी, जिसे `graph-ts` हेल्पर्स के साथ प्रोसेस किया जा सकता है ([documentation](/subgraphs/developing/creating/graph-ts/api/#json-api)). -The CID of the file as a readable string can be accessed via the `dataSource` as follows: +फ़ाइल का CID एक पठनीय स्ट्रिंग के रूप में `dataSource` के माध्यम से निम्नलिखित तरीके से प्राप्त किया जा सकता है: ```typescript const cid = dataSource.stringParam() @@ -269,10 +277,10 @@ export function handleMetadata(content: Bytes): void { अब आप चेन-आधारित हैंडलर के निष्पादन के दौरान फ़ाइल डेटा स्रोत बना सकते हैं: -- Import the template from the auto-generated `templates` +- ऑटो-जनरेटेड `templates` से टेम्पलेट आयात करें। - call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifiers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). @@ -521,6 +529,7 @@ calls: ```yaml calls: - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() + ``` ### मौजूदा सबग्राफ पर ग्राफ्टिंग @@ -542,10 +551,10 @@ When a subgraph whose manifest contains a `graft` block is deployed, Graph Node क्योंकि आधार डेटा को अनुक्रमित करने के बजाय प्रतियों को ग्राफ्ट करना, स्क्रैच से अनुक्रमणित करने की तुलना में सबग्राफ को वांछित ब्लॉक में प्राप्त करना बहुत तेज है, हालांकि बहुत बड़े सबग्राफ के लिए प्रारंभिक डेटा कॉपी में अभी भी कई घंटे लग सकते हैं। जबकि ग्राफ्टेड सबग्राफ को इनिशियलाइज़ किया जा रहा है, ग्राफ़ नोड उन एंटिटी प्रकारों के बारे में जानकारी लॉग करेगा जो पहले ही कॉपी किए जा चुके हैं। -ग्राफ्टेड सबग्राफ एक ग्राफक्यूएल स्कीमा का उपयोग कर सकता है जो बेस सबग्राफ के समान नहीं है, लेकिन इसके साथ केवल संगत है। यह अपने आप में एक मान्य सबग्राफ स्कीमा होना चाहिए, लेकिन निम्नलिखित तरीकों से बेस सबग्राफ के स्कीमा से विचलित हो सकता है: +ग्राफ्टेड सबग्राफ एक ग्राफक्यूएल स्कीमा का उपयोग कर सकता है जो बेस सबग्राफ के समान नहीं है, लेकिन इसके अनुकूल हो। यह अपने आप में एक मान्य सबग्राफ स्कीमा होना चाहिए, लेकिन निम्नलिखित तरीकों से बेस सबग्राफ के स्कीमा से विचलित हो सकता है: -- यह इकाई प्रकार जोड़ता या हटाता है -- यह इकाई प्रकारों से विशेषताएँ निकालता है +- यह इकाई के प्रकारों को जोड़ या हटा सकता है| +- यह इकाई प्रकारों में से गुणों को हटाता है| - यह प्रभावहीन गुणों को इकाई प्रकारों में जोड़ता है| - यह प्रभाव वाले गुणों को प्रभावहीन गुणों में बदल देता है| - यह इनम्स में महत्व देता है| From d40bf2049f66fe43f16800adddbec32cc4ff82e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:09 -0500 Subject: [PATCH 0153/1534] New translations api.mdx (Romanian) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ro/subgraphs/developing/creating/graph-ts/api.mdx index 4c5ce57993f2..5cd8ab120b25 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Given an IPFS hash or path, reading a file from IPFS is done as follows: From 22a1bfb044be69d1fbf238ee41ad995110f18e02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:10 -0500 Subject: [PATCH 0154/1534] New translations api.mdx (French) --- .../developing/creating/graph-ts/api.mdx | 102 +++++++++--------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/fr/subgraphs/developing/creating/graph-ts/api.mdx index 4ec8c6d6ebb0..d784114e01cc 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,7 +2,7 @@ title: API AssemblyScript --- -> Remarque : Si vous avez créé un subgraph avant la version `graph-cli`/`graph-ts` `0.22.0`, alors vous utilisez une ancienne version d'AssemblyScript. Il est recommandé de consulter le [`Guide de Migration `] (/resources/release-notes/assemblyscript-migration-guide/). +> Note : Si vous avez créé un subgraph avant la version `graph-cli`/`graph-ts` `0.22.0`, alors vous utilisez une ancienne version d'AssemblyScript. Il est recommandé de consulter le [`Guide de Migration`](/resources/release-notes/assemblyscript-migration-guide/). Découvrez quelles APIs intégrées peuvent être utilisées lors de l'écriture des mappages de subgraph. Il existe deux types d'APIs disponibles par défaut : @@ -29,16 +29,16 @@ La bibliothèque `@graphprotocol/graph-ts` fournit les API suivantes : La `apiVersion` dans le manifeste du subgraph spécifie la version de l'API de mappage exécutée par Graph Node pour un subgraph donné. -| Version | Notes de version | -| :-: | --- | -| 0.0.9 | Ajout de nouvelles fonctions hôtes [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Ajout de la validation pour l'existence des champs dans le schéma lors de l'enregistrement d'une entité. | -| 0.0.7 | Ajout des classes `TransactionReceipt` et `Log`aux types Ethereum
    Ajout du champ `receipt` à l'objet Ethereum Event | -| 0.0.6 | Ajout du champ `nonce` à l'objet Ethereum Transaction
    Ajout de `baseFeePerGas` à l'objet Ethereum Block | -| 0.0.5 | AssemblyScript a été mis à niveau vers la version 0.19.10 (ceci inclut des changements importants, veuillez consulter le [`Guide de Migration`](/resources/release-notes/assemblyscript-migration-guide/))
    . `ethereum.transaction.gasUsed` est renommé en `ethereum.transaction.gasLimit` | -| 0.0.4 | Ajout du champ `functionSignature` à l'objet Ethereum SmartContractCall | -| 0.0.3 | Ajout du champ `from` à l'objet Ethereum Call
    `etherem.call.address` est renommé en `ethereum.call.to` | -| 0.0.2 | Ajout du champ `input` à l'objet Ethereum Transaction | +| Version | Notes de version | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 0.0.9 | Ajout de nouvelles fonctions hôtes [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Ajout de la validation pour l'existence des champs dans le schéma lors de l'enregistrement d'une entité. | +| 0.0.7 | Ajout des classes `TransactionReceipt` et `Log`aux types Ethereum
    Ajout du champ `receipt` à l'objet Ethereum Event | +| 0.0.6 | Ajout du champ `nonce` à l'objet Ethereum Transaction
    Ajout de `baseFeePerGas` à l'objet Ethereum Block | +| 0.0.5 | AssemblyScript a été mis à niveau à niveau vers la version 0.19.10 (cela inclut des changements brusques, veuillez consulter le [`Guide de migration`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renommé en `ethereum.transaction.gasLimit` | +| 0.0.4 | Ajout du champ `functionSignature` à l'objet Ethereum SmartContractCall | +| 0.0.3 | Ajout du champ `from` à l'objet Ethereum Call
    `ethereum.call.address` renommé en `ethereum.call.to` | +| 0.0.2 | Ajout du champ `input` à l'objet Ethereum Transaction | ### Types intégrés @@ -280,7 +280,7 @@ Comme l'entité peut ne pas encore exister dans le magasin, la méthode `load` r Depuis `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 et `@graphprotocol/graph-cli` v0.49.0 la méthode `loadInBlock` est disponible pour tous les types d'entités. -L'API store facilite la récupération des entités qui ont été créées ou mises à jour dans le bloc actuel. Une situation typique est qu'un gestionnaire crée une transaction à partir d'un événement on-chain, et qu'un gestionnaire ultérieur souhaite accéder à cette transaction si elle existe. +L'API de store facilite la récupération des entités créées ou mises à jour dans le bloc actuel. Une situation typique pour cela est qu'un gestionnaire crée une transaction à partir d'un événement onchain et qu'un gestionnaire ultérieur souhaite accéder à cette transaction si elle existe. - Dans le cas où la transaction n'existe pas, le subgraph devra interroger la base de données pour découvrir que l'entité n'existe pas. Si l'auteur du subgraph sait déjà que l'entité doit avoir été créée dans le même bloc, utiliser `loadInBlock` évite ce détour par la base de données. - Pour certains subgraphs, ces recherches infructueuses peuvent contribuer de manière significative au temps d'indexation. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Les contrats intelligents ancrent occasionnellement des fichiers IPFS sur la blockchain. Cela permet aux mappages d'obtenir les hashs IPFS du contrat et de lire les fichiers correspondants à partir d'IPFS. Les données du fichier seront retournées sous forme de `Bytes`, ce qui nécessite généralement un traitement supplémentaire, par exemple avec l'API `json` documentée plus loin sur cette page. +Les contrats intelligents ancrent parfois des fichiers IPFS onchain. Cela permet aux mappages d'obtenir les hashs IPFS du contrat et de lire les fichiers correspondants depuis IPFS. Les données du fichier seront renvoyées sous forme de `Bytes`, ce qui nécessite généralement un traitement supplémentaire, par exemple avec l'API `json` documentée plus loin sur cette page. Étant donné un hachage ou un chemin IPFS, la lecture d'un fichier depuis IPFS se fait comme suit : @@ -770,44 +770,44 @@ Lorsque le type d'une valeur est certain, il peut être converti en un [type int ### Référence des conversions de types -| Source(s) | Destination | Fonctions de conversion | -| -------------------- | -------------------- | ---------------------------- | -| Address | Bytes | aucune | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | aucune | -| Bytes (signé) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (non signé) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | aucune | -| int32 | i32 | aucune | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | aucune | -| int64 - int256 | BigInt | aucune | -| uint32 - uint256 | BigInt | aucune | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Source(s) | Destination | Fonctions de conversion | +| --------------------- | -------------------- | -------------------------------- | +| Address | Bytes | aucune | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | aucune | +| Bytes (signé) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (non signé) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | aucune | +| int32 | i32 | aucune | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | aucune | +| int64 - int256 | BigInt | aucune | +| uint32 - uint256 | BigInt | aucune | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Métadonnées de la source de données From 1b2978e330eefebc4d8017083529d55fb8799add Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:12 -0500 Subject: [PATCH 0155/1534] New translations api.mdx (Spanish) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/es/subgraphs/developing/creating/graph-ts/api.mdx index 6dafeec664af..c8d2985155e0 100644 --- a/website/src/pages/es/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Version | Notas del lanzamiento | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Notas del lanzamiento | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Tipos Incorporados @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Dado un hash o ruta de IPFS, la lectura de un archivo desde IPFS se realiza de la siguiente manera: From dbb0b481821c972e1c409054b38e0a17aa7cdeea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:13 -0500 Subject: [PATCH 0156/1534] New translations api.mdx (Arabic) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ar/subgraphs/developing/creating/graph-ts/api.mdx index 7c6e468b79fc..00a7c89a1454 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| الاصدار | ملاحظات الإصدار | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| الاصدار | ملاحظات الإصدار | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### الأنواع المضمنة (Built-in) @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { 'import { ipfs } from '@graphprotocol/graph-ts ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Given an IPFS hash or path, reading a file from IPFS is done as follows: From 88415cca610a41f2093e155d8c120612e76dbc06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:14 -0500 Subject: [PATCH 0157/1534] New translations api.mdx (Czech) --- .../developing/creating/graph-ts/api.mdx | 102 +++++++++--------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/cs/subgraphs/developing/creating/graph-ts/api.mdx index 157f54abcd02..4bd1104996f9 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ Knihovna `@graphprotocol/graph-ts` poskytuje následující API: `apiVersion` v manifestu podgrafu určuje verzi mapovacího API, kterou pro daný podgraf používá uzel Graf. -| Verze | Poznámky vydání | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Přidá ověření existence polí ve schéma při ukládání entity. | -| 0.0.7 | Přidání tříd `TransactionReceipt` a `Log` do typů Ethereum
    Přidání pole `receipt` do objektu Ethereum událost | -| 0.0.6 | Přidáno pole `nonce` do objektu Ethereum Transaction
    Přidáno `baseFeePerGas` do objektu Ethereum bloku | -| 0.0.5 | AssemblyScript povýšen na verzi 0.19.10 (obsahuje rozbíjející změny, viz [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` přejmenováno na `ethereum.transaction.gasLimit` | -| 0.0.4 | Přidání pole `functionSignature` do objektu Ethereum SmartContractCall | -| 0.0.3 | Do objektu Ethereum Call přidáno pole `from`
    `etherem.call.address` přejmenováno na `ethereum.call.to` | -| 0.0.2 | Přidání pole `input` do objektu Ethereum Transackce | +| Verze | Poznámky vydání | +| :---: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Přidá ověření existence polí ve schéma při ukládání entity. | +| 0.0.7 | Přidání tříd `TransactionReceipt` a `Log` do typů Ethereum
    Přidání pole `receipt` do objektu Ethereum událost | +| 0.0.6 | Přidáno pole `nonce` do objektu Ethereum Transaction
    Přidáno `baseFeePerGas` do objektu Ethereum bloku | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Přidání pole `functionSignature` do objektu Ethereum SmartContractCall | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Přidání pole `input` do objektu Ethereum Transackce | ### Vestavěné typy @@ -147,7 +147,7 @@ _Math_ - `x.notEqual(y: BigInt): bool` –lze zapsat jako `x != y`. - `x.lt(y: BigInt): bool` – lze zapsat jako `x < y`. - `x.le(y: BigInt): bool` – lze zapsat jako `x <= y`. -- `x.gt(y: BigInt): bool` – lze zapsat jako `x > y`. +- `x.gt(y: BigInt): bool` – lze zapsat jako `x > y`. - `x.ge(y: BigInt): bool` – lze zapsat jako `x >= y`. - `x.neg(): BigInt` – lze zapsat jako `-x`. - `x.divDecimal(y: BigDecimal): BigDecimal` – dělí desetinným číslem, čímž získá desetinný výsledek. @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value Od verzí `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 a `@graphprotocol/graph-cli` v0.49.0 je metoda `loadInBlock` dostupná pro všechny typy entit. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Chytré smlouvy příležitostně ukotvují soubory IPFS v řetězci. To umožňuje mapování získat hashe IPFS ze smlouvy a načíst odpovídající soubory z IPFS. Data souborů budou vrácena jako `Bajty`, což obvykle vyžaduje další zpracování, např. pomocí API `json` zdokumentovaného později na této stránce. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Při zadání hashe nebo cesty IPFS se čtení souboru ze systému IPFS provádí následujícím způsobem: @@ -770,44 +770,44 @@ Pokud je typ hodnoty jistý, lze ji převést na [vestavěný typ](#built-in-typ ### Převody typů Reference -| Zdroj(e) | Destinace | Funkce převodu | -| ----------------------- | ----------------------- | ---------------------------- | -| Address | Bytes | none | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimální) | s.toHexString() or s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | none | -| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | Řetězec (hexadecimální) | s.toHexString() or s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | none | -| int32 | i32 | none | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | none | -| int64 - int256 | BigInt | none | -| uint32 - uint256 | BigInt | none | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| Řetězec (hexadecimální) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Zdroj(e) | Destinace | Funkce převodu | +| ---------------------- | ---------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimální) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimální) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimální) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Metadata zdroje dat From c3accd0b55f3e7d385a7c7f77bb9157cfcd64901 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:15 -0500 Subject: [PATCH 0158/1534] New translations api.mdx (German) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/de/subgraphs/developing/creating/graph-ts/api.mdx index 522282ec37d7..d3118e43e306 100644 --- a/website/src/pages/de/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Given an IPFS hash or path, reading a file from IPFS is done as follows: From c660d0b45f840037e1f2181ac111c868510a2bc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:17 -0500 Subject: [PATCH 0159/1534] New translations api.mdx (Italian) --- .../developing/creating/graph-ts/api.mdx | 100 +++++++++--------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/it/subgraphs/developing/creating/graph-ts/api.mdx index 3d017c6425f6..0cb5bcc1519c 100644 --- a/website/src/pages/it/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ La libreria `@graphprotocol/graph-ts` fornisce le seguenti API: La `apiVersion` nel manifest del subgraph specifica la versione dell'API di mappatura che viene eseguita da the Graph Node per un dato subgraph. -| Versione | Note di rilascio | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Aggiunte le classi `TransactionReceipt` e `Log` ai tipi di Ethereum
    Aggiunto il campo `receipt` all'oggetto Ethereum Event | -| 0.0.6 | Aggiunto il campo `nonce` all'oggetto Ethereum Transaction
    Aggiunto `baseFeePerGas` all'oggetto Ethereum Block | -| 0.0.5 | AssemblyScript aggiornato alla versione 0.19.10 (questo include modifiche di rottura, consultare la [`Guida alla migrazione`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` rinominato in `ethereum.transaction.gasLimit` | -| 0.0.4 | Aggiunto il campo `functionSignature` all'oggetto Ethereum SmartContractCall | -| 0.0.3 | Aggiunto il campo `from` all'oggetto Ethereum Call
    `etherem.call.address` rinominato in `ethereum.call.to` | -| 0.0.2 | Aggiunto il campo `input` all'oggetto Ethereum Transaction | +| Versione | Note di rilascio | +| :------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Aggiunte le classi `TransactionReceipt` e `Log` ai tipi di Ethereum
    Aggiunto il campo `receipt` all'oggetto Ethereum Event | +| 0.0.6 | Aggiunto il campo `nonce` all'oggetto Ethereum Transaction
    Aggiunto `baseFeePerGas` all'oggetto Ethereum Block | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Aggiunto il campo `functionSignature` all'oggetto Ethereum SmartContractCall | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Aggiunto il campo `input` all'oggetto Ethereum Transaction | ### Tipi integrati @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value A partire da `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 e `@graphprotocol/graph-cli` v0.49.0 il metodo `loadInBlock` è disponibile per tutti i tipi di entità. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Gli smart contract di tanto in tanto ancorano i file IPFS sulla chain. Ciò consente alle mappature di ottenere gli hash IPFS dal contratto e di leggere i file corrispondenti da IPFS. I dati del file saranno restituiti come `Byte`, che di solito richiedono un'ulteriore elaborazione, ad esempio con l'API `json` documentata più avanti in questa pagina. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Dato un hash o un percorso IPFS, la lettura di un file da IPFS avviene come segue: @@ -770,44 +770,44 @@ Quando il tipo di un valore è certo, può essere convertito in un [tipo incorpo ### Riferimento alle conversioni di tipo -| Fonte(i) | Destinazione | Funzione di conversione | -| -------------------- | -------------------- | --------------------------- | -| Address | Bytes | none | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() o s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | none | -| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() o s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | none | -| int32 | i32 | none | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | none | -| int64 - int256 | BigInt | none | -| uint32 - uint256 | BigInt | none | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Fonte(i) | Destinazione | Funzione di conversione | +| -------------------- | --------------------- | -------------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() o s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() o s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Metadati della Data Source From 924284fc21a268fcf5cdcd4e8642e995f0dab238 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:18 -0500 Subject: [PATCH 0160/1534] New translations api.mdx (Japanese) --- .../developing/creating/graph-ts/api.mdx | 102 +++++++++--------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ja/subgraphs/developing/creating/graph-ts/api.mdx index 2a66b2ead6d6..9fdb7beadc1e 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ Since language mappings are written in AssemblyScript, it is useful to review th サブグラフマニフェストapiVersionは、特定のサブグラフのマッピングAPIバージョンを指定します。このバージョンは、Graph Nodeによって実行されます。 -| バージョン | リリースノート | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Ethereum タイプに `TransactionReceipt` と `Log` クラスを追加
    Ethereum Event オブジェクトに `receipt` フィールドを追加。 | -| 0.0.6 | Ethereum Transactionオブジェクトに`nonce`フィールドを追加
    Ethereum Blockオブジェクトに`baseFeePerGas`を追加。 | -| 0.0.5 | AssemblyScriptはバージョン0.19.10にアップグレードされました(このバージョンアップには変更点が含まれていますので Migration Guide) をご覧ください)。
    ethereum.transaction.gasUsedの名前がethereum.transaction.gasLimitに変更 | -| 0.0.4 | Ethereum SmartContractCall オブジェクトにfunctionSignatureフィールドを追加 | -| 0.0.3 | イーサリアムコールオブジェクトに`from`フィールドを追加
    `etherem.call.address`を`ethereum.call.to`に変更。 | -| 0.0.2 | Ethereum Transaction オブジェクトに inputフィールドを追加 | +| バージョン | リリースノート | +| :---: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Ethereum タイプに `TransactionReceipt` と `Log` クラスを追加
    Ethereum Event オブジェクトに `receipt` フィールドを追加。 | +| 0.0.6 | Ethereum Transactionオブジェクトに`nonce`フィールドを追加
    Ethereum Blockオブジェクトに`baseFeePerGas`を追加。 | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Ethereum SmartContractCall オブジェクトにfunctionSignatureフィールドを追加 | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Ethereum Transaction オブジェクトに inputフィールドを追加 | ### 組み込み型 @@ -280,13 +280,13 @@ As the entity may not exist in the store yet, the `load` method returns a value graph-node v0.31.0、@graphprotocol/graph-ts v0.30.0、および @graphprotocol/graph-cli v0.49.0 以降、 loadInBlock メソッドはすべてのエンティティ タイプで使用できます。 -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript -let id = event.transaction.hash // または ID が構築される方法 +let id =event.transaction.hash // または ID が構築される方法 let transfer = Transfer.loadInBlock(id) if (transfer == null) { transfer = 新しい転送(id) @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { '@graphprotocol/graph-ts'から { ipfs } をインポートします。 ``` -スマートコントラクトは時折、チェーン上の IPFS ファイルをアンカリングします。 これにより、マッピングはコントラクトから IPFS ハッシュを取得し、IPFS から対応するファイルを読み取ることができます。 ファイルのデータはBytesとして返されますが、通常は、このページで後述する json API などを使ってさらに処理する必要があります。 +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. IPFS のハッシュやパスが与えられた場合、IPFS からのファイルの読み込みは以下のように行われます。 @@ -770,44 +770,44 @@ if (value.kind == JSONValueKind.BOOL) { ### タイプ 変換参照 -| Source(s) | Destination | Conversion function | -| -------------------- | -------------------- | ---------------------------- | -| Address | Bytes | none | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | none | -| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | none | -| int32 | i32 | none | -| int32 | BigInt | Bigint.fromI32(s) | -| uint24 | i32 | none | -| int64 - int256 | BigInt | none | -| uint32 - uint256 | BigInt | none | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromString(s) | -| String | BigInt | BigDecimal.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | -------------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | Bigint.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromString(s) | +| String | BigInt | BigDecimal.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### データソースのメタデータ From 6b810ebc887fe308efc3e733722bbc57366b5f7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:19 -0500 Subject: [PATCH 0161/1534] New translations api.mdx (Korean) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ko/subgraphs/developing/creating/graph-ts/api.mdx index 4c5ce57993f2..5cd8ab120b25 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Given an IPFS hash or path, reading a file from IPFS is done as follows: From bb0ca33e888b0cc614baf72d9698f4c82d2d47ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:20 -0500 Subject: [PATCH 0162/1534] New translations api.mdx (Dutch) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/nl/subgraphs/developing/creating/graph-ts/api.mdx index 4c5ce57993f2..5cd8ab120b25 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Given an IPFS hash or path, reading a file from IPFS is done as follows: From 1355701a4857d39a61874cddf9ef048ec46d6f72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:21 -0500 Subject: [PATCH 0163/1534] New translations api.mdx (Polish) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/pl/subgraphs/developing/creating/graph-ts/api.mdx index 4c5ce57993f2..5cd8ab120b25 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Given an IPFS hash or path, reading a file from IPFS is done as follows: From 6d8fe7fbde544fc5fb8747344517084528cc7ca8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:23 -0500 Subject: [PATCH 0164/1534] New translations api.mdx (Portuguese) --- .../developing/creating/graph-ts/api.mdx | 103 +++++++++--------- 1 file changed, 52 insertions(+), 51 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/pt/subgraphs/developing/creating/graph-ts/api.mdx index a7521399250e..ba49a090be91 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ A biblioteca `@graphprotocol/graph-ts` fornece as seguintes APIs: No manifest do subgraph, `apiVersion` especifica a versão da API de mapeamento, executada pelo Graph Node para um subgraph. -| Versão | Notas de atualização | -| :-: | --- | -| 0.0.9 | Adiciona novas funções de host [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adiciona validação para existência de campos no schema ao salvar uma entidade. | -| 0.0.7 | Classes `TransactionReceipt` e `Log` adicionadas aos tipos do EthereumCampo
    Campo `receipt` adicionado ao objeto Ethereum Event | -| 0.0.6 | Campo `nonce` adicionado ao objeto Ethereum TransactionCampo
    `baseFeePerGas` adicionado ao objeto Ethereum Block | -| 0.0.5 | AssemblyScript atualizado à versão 0.19.10 (inclui mudanças recentes, favor ler o [`Guia de Migração`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renomeado para `ethereum.transaction.gasLimit` | -| 0.0.4 | Campo `functionSignature` adicionado ao objeto Ethereum SmartContractCall | -| 0.0.3 | Campo `from` adicionado ao objeto Ethereum
    `Calletherem.call.address` renomeado para `ethereum.call.to` | -| 0.0.2 | Campo `input` adicionado ao objeto Ethereum Transaction | +| Versão | Notas de atualização | +| :----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adiciona novas funções de host [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adiciona validação para existência de campos no schema ao salvar uma entidade. | +| 0.0.7 | Classes `TransactionReceipt` e `Log` adicionadas aos tipos do EthereumCampo
    Campo `receipt` adicionado ao objeto Ethereum Event | +| 0.0.6 | Campo `nonce` adicionado ao objeto Ethereum TransactionCampo
    `baseFeePerGas` adicionado ao objeto Ethereum Block | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Campo `functionSignature` adicionado ao objeto Ethereum SmartContractCall | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Campo `input` adicionado ao objeto Ethereum Transaction | ### Tipos Embutidos @@ -166,7 +166,8 @@ _Matemática_ import { TypedMap } from '@graphprotocol/graph-ts' ``` -O `TypedMap` pode servir para armazenar pares de chave e valor (key e value ). Confira [este exemplo](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). +O `TypedMap` pode servir para armazenar pares de chave e valor (key e value +). Confira [este exemplo](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). A classe `TypedMap` tem a seguinte API: @@ -280,7 +281,7 @@ As the entity may not exist in the store yet, the `load` method returns a value Desde o `graph-node` v0.31.0, o `@graphprotocol/graph-ts` v0.30.0 e o `@graphprotocol/graph-cli v0.49.0`, o método `loadInBlock` está disponível em todos os tipos de entidade. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +673,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Contratos inteligentes ocasionalmente ancoram arquivos IPFS on-chain. Assim, os mapeamentos obtém os hashes IPFS do contrato e lêem os arquivos correspondentes do IPFS. Os dados dos arquivos serão retornados como `Bytes`, o que costuma exigir mais processamento; por ex., com a API `json` documentada mais abaixo nesta página. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Considerando um hash ou local IPFS, um arquivo do IPFS é lido da seguinte maneira: @@ -770,44 +771,44 @@ Quando o tipo de um valor é confirmado, ele pode ser convertido num [tipo embut ### Referência de Conversões de Tipos -| Fonte(s) | Destino | Função de conversão | -| -------------------- | -------------------- | ---------------------------- | -| Address | Bytes | nenhum | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() ou s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | nenhum | -| Bytes (assinado) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (não assinado) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() ou s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | nenhum | -| int32 | i32 | nenhum | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | nenhum | -| int64 - int256 | BigInt | nenhum | -| uint32 - uint256 | BigInt | nenhum | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Fonte(s) | Destino | Função de conversão | +| ------------------------ | -------------------- | ------------------------------ | +| Address | Bytes | nenhum | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() ou s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | nenhum | +| Bytes (assinado) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (não assinado) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() ou s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | nenhum | +| int32 | i32 | nenhum | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | nenhum | +| int64 - int256 | BigInt | nenhum | +| uint32 - uint256 | BigInt | nenhum | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Metadados de Fontes de Dados From e4ff5e57b8256f71425afe58e6015571fc5ce450 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:24 -0500 Subject: [PATCH 0165/1534] New translations api.mdx (Russian) --- .../developing/creating/graph-ts/api.mdx | 112 +++++++++--------- 1 file changed, 57 insertions(+), 55 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ru/subgraphs/developing/creating/graph-ts/api.mdx index 5609d9565220..727f252a9650 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,7 +2,7 @@ title: AssemblyScript API --- -> Примечание: Если Вы создали субграф до версии `graph-cli`/`graph-ts` 0.22.0`, значит, Вы используете более старую версию AssemblyScript. Рекомендуется ознакомиться с [`Руководством по миграции\`](/resources/release-notes/assemblyscript-migration-guide/). +> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/). Узнайте, какие встроенные API можно использовать при написании мэппингов субграфов. По умолчанию доступны два типа API: @@ -29,16 +29,16 @@ title: AssemblyScript API `apiVersion` в манифесте субграфа указывает версию мэппинга API, которая запускается посредством Graph Node для данного субграфа. -| Версия | Примечания к релизу | -| :-: | --- | -| 0.0.9 | Добавлены новые функции хоста [`eth_get_balance`](#balance-of-an-address) и [`hasCode`](#check-if-an-address-a-contract-or-eoa) | -| 0.0.8 | Добавлена проверка наличия полей в схеме при сохранении объекта. | -| 0.0.7 | К типам Ethereum добавлены классы `TransactionReceipt` и `Log`
    К объекту Ethereum Event добавлено поле `receipt` | -| 0.0.6 | В объект Ethereum Transaction добавлено поле `nonce`
    В объект Ethereum Block добавлено поле `baseFeePerGas` | -| 0.0.5 | AssemblyScript обновлен до версии 0.19.10 (включая критические изменения, см. [`Руководство по миграции`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` переименован в `ethereum.transaction.gasLimit` | -| 0.0.4 | В объект Ethereum SmartContractCall добавлено поле `functionSignature` | -| 0.0.3 | В объект Ethereum Call добавлено поле `from`
    `etherem.call.address` переименован в ethereum.call.to\` | -| 0.0.2 | В объект Ethereum Transaction добавлено поле `input` | +| Версия | Примечания к релизу | +| :----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Добавлены новые функции хоста [`eth_get_balance`](#balance-of-an-address) и [`hasCode`](#check-if-an-address-a-contract-or-eoa) | +| 0.0.8 | Добавлена проверка наличия полей в схеме при сохранении объекта. | +| 0.0.7 | К типам Ethereum добавлены классы `TransactionReceipt` и `Log`
    К объекту Ethereum Event добавлено поле `receipt` | +| 0.0.6 | В объект Ethereum Transaction добавлено поле `nonce`
    В объект Ethereum Block добавлено поле `baseFeePerGas` | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | В объект Ethereum SmartContractCall добавлено поле `functionSignature` | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | В объект Ethereum Transaction добавлено поле `input` | ### Встроенные типы @@ -233,7 +233,7 @@ API `store` позволяет загружать, сохранять и уда // Импорт класса событий Transfer, сгенерированного из ERC20 ABI import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' -// Импорт типа объекта Transfer, сгенерированного из схемы GraphQL +// Импорт типа объекта Transfer, сгенерированного из схемы GraphQL import { Transfer } from '../generated/schema' событие // Обработчик события передачи @@ -269,6 +269,7 @@ if (transfer == null) { transfer = new Transfer(id) } + // Используйте объект Transfer, как и раньше ``` @@ -280,7 +281,7 @@ if (transfer == null) { Начиная с `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 и `@graphprotocol/graph-cli` v0.49.0 метод `loadInBlock` доступен для всех типов объектов. -API хранилища облегчает извлечение объектов, которые были созданы или обновлены в текущем блоке. Типичная ситуация: один обработчик создает транзакцию из какого-то события в он-чейне, а следующий обработчик хочет получить доступ к этой транзакции, если она существует. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - В случае, если транзакция не существует, субграф должен будет обратиться к базе данных просто для того, чтобы узнать, что объект не существует. Если автор субграфа уже знает, что объект должен быть создан в том же блоке, использование `loadInBlock` позволяет избежать этого обращения к базе данных. - Для некоторых субграфов эти пропущенные поиски могут существенно увеличить время индексации. @@ -292,6 +293,7 @@ if (transfer == null) { transfer = new Transfer(id) } + // Используйте объект Transfer, как и раньше ``` @@ -672,7 +674,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Смарт-контракты иногда привязывают файлы IPFS к чейну. Это позволяет мэппингам получать хэши IPFS из контракта и считывать соответствующие файлы из IPFS. Данные файла будут возвращены в виде `Bytes`, что обычно требует дальнейшей обработки, например, с помощью `json` API, описанного далее на этой странице. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. При наличии хеша или пути IPFS чтение файла из IPFS выполняется следующим образом: @@ -695,8 +697,8 @@ let data = ipfs.cat(path) import { JSONValue, Value } from '@graphprotocol/graph-ts' export function processItem(value: JSONValue, userData: Value): void { - // Смотрите документацию по JsonValue для получения подробной информации о работе - // со значениями JSON +// Смотрите документацию по JsonValue для получения подробной информации о работе +// со значениями JSON let obj = value.toObject() let id = obj.get('id') let title = obj.get('title') @@ -705,7 +707,7 @@ export function processItem(value: JSONValue, userData: Value): void { return } - // Обратные вызовы также могут создавать объекты +// Обратные вызовы также могут создавать объекты let newItem = new Item(id) newItem.title = title.toString() newitem.parent = userData.toString() // Установите для родителя значение "parentId" @@ -770,44 +772,44 @@ if (value.kind == JSONValueKind.BOOL) { ### Справка по преобразованию типов -| Источник(и) | Место назначения | Функция преобразования | -| -------------------- | -------------------- | ----------------------------- | -| Address | Bytes | отсутствует | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() или s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | отсутствует | -| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() или s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | отсутствует | -| int32 | i32 | отсутствует | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | отсутствует | -| int64 - int256 | BigInt | отсутствует | -| uint32 - uint256 | BigInt | отсутствует | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toU64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Источник(и) | Место назначения | Функция преобразования | +| ---------------------- | ------------------------- | ----------------------------------- | +| Address | Bytes | отсутствует | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() или s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | отсутствует | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() или s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | отсутствует | +| int32 | i32 | отсутствует | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | отсутствует | +| int64 - int256 | BigInt | отсутствует | +| uint32 - uint256 | BigInt | отсутствует | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toU64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Метаданные источника данных From 59f3ed75e609a9c017e338f87bc1321a31b83e59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:25 -0500 Subject: [PATCH 0166/1534] New translations api.mdx (Swedish) --- .../developing/creating/graph-ts/api.mdx | 168 ++++++++++-------- 1 file changed, 89 insertions(+), 79 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/sv/subgraphs/developing/creating/graph-ts/api.mdx index 78e7403e21f0..b2431a839946 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Version | Versionsanteckningar | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Versionsanteckningar | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Inbyggda typer @@ -163,7 +163,7 @@ _Math_ #### TypedMap ```typescript -import { TypedMap } from '@graphprotocol/graph-ts' +import { TypedMap } from "@graphprotocol/graph-ts"; ``` `TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). @@ -179,7 +179,7 @@ The `TypedMap` class has the following API: #### Bytes ```typescript -import { Bytes } from '@graphprotocol/graph-ts' +import { Bytes } from "@graphprotocol/graph-ts"; ``` `Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. @@ -205,7 +205,7 @@ _Operators_ #### Address ```typescript -import { Address } from '@graphprotocol/graph-ts' +import { Address } from "@graphprotocol/graph-ts"; ``` `Address` extends `Bytes` to represent Ethereum `address` values. @@ -218,7 +218,7 @@ It adds the following method on top of the `Bytes` API: ### Store API ```typescript -import { store } from '@graphprotocol/graph-ts' +import { store } from "@graphprotocol/graph-ts"; ``` The `store` API allows to load, save and remove entities from and to the Graph Node store. @@ -231,24 +231,24 @@ Följande är ett vanligt mönster för att skapa entiteter från Ethereum-händ ```typescript // Importera händelseklassen Transfer som genererats från ERC20 ABI -import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' +import { Transfer as TransferEvent } from "../generated/ERC20/ERC20"; // Importera entitetstypen Transfer som genererats från GraphQL-schemat -import { Transfer } from '../generated/schema' +import { Transfer } from "../generated/schema"; // Händelsehanterare för överföring export function handleTransfer(event: TransferEvent): void { // Skapa en Transfer-entitet, med transaktionshash som enhets-ID - let id = event.transaction.hash - let transfer = new Transfer(id) + let id = event.transaction.hash; + let transfer = new Transfer(id); // Ange egenskaper för entiteten med hjälp av händelseparametrarna - transfer.from = event.params.from - transfer.to = event.params.to - transfer.amount = event.params.amount + transfer.from = event.params.from; + transfer.to = event.params.to; + transfer.amount = event.params.amount; // Spara entiteten till lagret - transfer.save() + transfer.save(); } ``` @@ -263,10 +263,10 @@ Each entity must have a unique ID to avoid collisions with other entities. It is Om en entitet redan finns kan den laddas från lagret med följande: ```typescript -let id = event.transaction.hash // eller hur ID konstrueras -let transfer = Transfer.load(id) +let id = event.transaction.hash; // eller hur ID konstrueras +let transfer = Transfer.load(id); if (transfer == null) { - transfer = new Transfer(id) + transfer = new Transfer(id); } // Använd överföringsenheten som tidigare @@ -280,16 +280,16 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. ```typescript -let id = event.transaction.hash // eller hur ID konstrueras -let transfer = Transfer.loadInBlock(id) +let id = event.transaction.hash; // eller hur ID konstrueras +let transfer = Transfer.loadInBlock(id); if (transfer == null) { - transfer = new Transfer(id) + transfer = new Transfer(id); } // Använd överföringsenheten som tidigare @@ -343,7 +343,7 @@ transfer.amount = ... Det är också möjligt att avaktivera egenskaper med en av följande två instruktioner: ```typescript -transfer.from.unset() +transfer.from.unset(); transfer.from = null ``` @@ -353,14 +353,14 @@ Updating array properties is a little more involved, as the getting an array fro ```typescript // Detta kommer inte att fungera -entity.numbers.push(BigInt.fromI32(1)) -entity.save() +entity.numbers.push(BigInt.fromI32(1)); +entity.save(); // Detta kommer att fungera -let numbers = entity.numbers -numbers.push(BigInt.fromI32(1)) -entity.numbers = numbers -entity.save() +let numbers = entity.numbers; +numbers.push(BigInt.fromI32(1)); +entity.numbers = numbers; +entity.save(); ``` #### Ta bort entiteter från lagret @@ -398,12 +398,12 @@ type Transfer @entity { and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: ```typescript -let id = event.transaction.hash -let transfer = new Transfer(id) -transfer.from = event.params.from -transfer.to = event.params.to -transfer.amount = event.params.amount -transfer.save() +let id = event.transaction.hash; +let transfer = new Transfer(id); +transfer.from = event.params.from; +transfer.to = event.params.to; +transfer.amount = event.params.amount; +transfer.save(); ``` #### Händelser och Block/Transaktionsdata @@ -489,16 +489,19 @@ En vanlig mönster är att komma åt kontraktet från vilket en händelse härst ```typescript // Importera den genererade kontraktsklassen och den genererade klassen för överföringshändelser -import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +import { + ERC20Contract, + Transfer as TransferEvent, +} from "../generated/ERC20Contract/ERC20Contract"; // Importera den genererade entitetsklassen -import { Transfer } from '../generated/schema' +import { Transfer } from "../generated/schema"; export function handleTransfer(event: TransferEvent) { // Bind kontraktet till den adress som skickade händelsen - let contract = ERC20Contract.bind(event.address) + let contract = ERC20Contract.bind(event.address); // Åtkomst till tillståndsvariabler och funktioner genom att anropa dem - let erc20Symbol = contract.symbol() + let erc20Symbol = contract.symbol(); } ``` @@ -515,12 +518,12 @@ If the read-only methods of your contract may revert, then you should handle tha - For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: ```typescript -let gravitera = gravitera.bind(event.address) -let callResult = gravitera_gravatarToOwner(gravatar) +let gravitera = gravitera.bind(event.address); +let callResult = gravitera_gravatarToOwner(gravatar); if (callResult.reverted) { - log.info('getGravatar reverted', []) + log.info("getGravatar reverted", []); } else { - let owner = callResult.value + let owner = callResult.value; } ``` @@ -579,7 +582,7 @@ let isContract = ethereum.hasCode(eoa).inner // returns false ### API för loggning ```typescript -import { log } from '@graphprotocol/graph-ts' +import { log } from "@graphprotocol/graph-ts"; ``` The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. @@ -595,7 +598,11 @@ The `log` API includes the following functions: The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. ```typescript -log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +log.info("Message to be displayed: {}, {}, {}", [ + value.toString(), + anotherValue.toString(), + "already a string", +]); ``` #### Loggning av ett eller flera värden @@ -618,11 +625,11 @@ export function handleSomeEvent(event: SomeEvent): void { I exemplet nedan loggas endast det första värdet i argument arrayen, trots att arrayen innehåller tre värden. ```typescript -let myArray = ['A', 'B', 'C'] +let myArray = ["A", "B", "C"]; export function handleSomeEvent(event: SomeEvent): void { // Visar : "Mitt värde är: A" (Även om tre värden skickas till `log.info`) - log.info('Mitt värde är: {}', myArray) + log.info("Mitt värde är: {}", myArray); } ``` @@ -631,11 +638,14 @@ export function handleSomeEvent(event: SomeEvent): void { Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. ```typescript -let myArray = ['A', 'B', 'C'] +let myArray = ["A", "B", "C"]; export function handleSomeEvent(event: SomeEvent): void { // Visar: "Mitt första värde är: A, andra värdet är: B, tredje värdet är: C" - log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) + log.info( + "My first value is: {}, second value is: {}, third value is: {}", + myArray + ); } ``` @@ -646,7 +656,7 @@ För att visa ett specifikt värde i arrayen måste det indexeras och tillhandah ```typescript export function handleSomeEvent(event: SomeEvent): void { // Visar : "Mitt tredje värde är C" - log.info('My third value is: {}', [myArray[2]]) + log.info("My third value is: {}", [myArray[2]]); } ``` @@ -655,36 +665,36 @@ export function handleSomeEvent(event: SomeEvent): void { I exemplet nedan loggas blocknummer, blockhash och transaktionshash från en händelse: ```typescript -import { log } from '@graphprotocol/graph-ts' +import { log } from "@graphprotocol/graph-ts"; export function handleSomeEvent(event: SomeEvent): void { - log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + log.debug("Block number: {}, block hash: {}, transaction hash: {}", [ event.block.number.toString(), // "47596000" event.block.hash.toHexString(), // "0x..." event.transaction.hash.toHexString(), // "0x..." - ]) + ]); } ``` ### IPFS API ```typescript -import { ipfs } from '@graphprotocol/graph-ts' +import { ipfs } from "@graphprotocol/graph-ts" ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. För att läsa en fil från IPFS med en given IPFS-hash eller sökväg görs följande: ```typescript // Placera detta i en händelsehanterare i mappningen -let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' -let data = ipfs.cat(hash) +let hash = "QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D"; +let data = ipfs.cat(hash); // Sökvägar som `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` // som inkluderar filer i kataloger stöds också -let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' -let data = ipfs.cat(path) +let path = "QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile"; +let data = ipfs.cat(path); ``` **Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. @@ -692,31 +702,31 @@ let data = ipfs.cat(path) It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: ```typescript -import { JSONValue, Value } from '@graphprotocol/graph-ts' +import { JSONValue, Value } from "@graphprotocol/graph-ts"; export function processItem(value: JSONValue, userData: Value): void { // Se JSONValue-dokumentationen för mer information om hur man hanterar // med JSON-värden - let obj = value.toObject() - let id = obj.get('id') - let title = obj.get('title') + let obj = value.toObject(); + let id = obj.get("id"); + let title = obj.get("title"); if (!id || !title) { - return + return; } // Callbacks kan också skapa enheter - let newItem = new Item(id) - newItem.title = title.toString() - newitem.parent = userData.toString() // Ange parent till "parentId" - newitem.save() + let newItem = new Item(id); + newItem.title = title.toString(); + newitem.parent = userData.toString(); // Ange parent till "parentId" + newitem.save(); } // Placera detta i en händelsehanterare i mappningen -ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) +ipfs.map("Qm...", "processItem", Value.fromString("parentId"), ["json"]); // Alternativt kan du använda `ipfs.mapJSON`. -ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +ipfs.mapJSON("Qm...", "processItem", Value.fromString("parentId")); ``` The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. @@ -726,7 +736,7 @@ On success, `ipfs.map` returns `void`. If any invocation of the callback causes ### Crypto API ```typescript -import { crypto } from '@graphprotocol/graph-ts' +import { crypto } from "@graphprotocol/graph-ts"; ``` The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: @@ -736,7 +746,7 @@ The `crypto` API makes a cryptographic functions available for use in mappings. ### JSON API ```typescript -import { json, JSONValueKind } from '@graphprotocol/graph-ts' +import { json, JSONValueKind } from "@graphprotocol/graph-ts" ``` JSON data can be parsed using the `json` API: From 1166099497c97cd30f64106f45aefb23aebf48b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:27 -0500 Subject: [PATCH 0167/1534] New translations api.mdx (Turkish) --- .../developing/creating/graph-ts/api.mdx | 108 +++++++++--------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/tr/subgraphs/developing/creating/graph-ts/api.mdx index 2e9ea9e06d56..bda6a0f67c61 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/graph-ts/api.mdx @@ -2,7 +2,7 @@ title: AssemblyScript API'si --- -> Not: Eğer `graph-cli`/`graph-ts` sürüm `0.22.0`'den önce bir subgraph oluşturduysanız, eski bir AssemblyScript sürümünü kullanıyorsunuz demektir. [`Geçiş Kılavuzu`](/resources/release-notes/assemblyscript-migration-guide/) gözden geçirmeniz önerilir. +> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/). Subgraph eşlemeleri yazarken kullanılabilecek yerleşik API'leri öğrenin. Hazır olarak sunulan iki tür API mevcuttur: @@ -29,16 +29,16 @@ Dil eşlemeleri AssemblyScript ile yazıldığından, [AssemblyScript wiki'sinde Subgraph manifestosundaki `apiVersion`, bir subgraph için Graph Düğümü tarafından çalıştırılan eşleme (mapping) API'sinin sürümünü belirtir. -| Sürüm | Sürüm Notları | -| :-: | --- | -| 0.0.9 | Yeni host fonksiyonları ekler: [`eth_get_balance`](#balance-of-an-address) ve [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Bir varlığı kaydederken şemadaki alanların varlığını doğrulama mekanizması ekler. | -| 0.0.7 | Ethereum türlerine `TransactionReceipt` ve `Log` sınıfları eklendi
    Ethereum Event nesnesine `receipt` alanı eklendi | -| 0.0.6 | Ethereum Transaction nesnesine `nonce` alanı eklendi
    Ethereum Block nesnesine `baseFeePerGas` eklendi | -| 0.0.5 | AssemblyScript sürümü 0.19.10'a yükseltildi (bu sürümle birlikte uyumluluğu bozabilecek değişiklikler yapılmıştır, lütfen [`Geçiş Kılavuzu`](/resources/release-notes/assemblyscript-migration-guide/) bölümüne bakın)
    `ethereum.transaction.gasUsed`, `ethereum.transaction.gasLimit` olarak yeniden adlandırıldı | -| 0.0.4 | Ethereum SmartContractCall nesnesine `functionSignature` alanı eklendi | -| 0.0.3 | Ethereum Call nesnesine `from` alanı eklendi
    `etherem.call.address`, `ethereum.call.to` olarak yeniden adlandırıldı | -| 0.0.2 | Ethereum Transaction nesnesine `input` alanı eklendi | +| Sürüm | Sürüm Notları | +| :---: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Yeni host fonksiyonları ekler: [`eth_get_balance`](#balance-of-an-address) ve [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Bir varlığı kaydederken şemadaki alanların varlığını doğrulama mekanizması ekler. | +| 0.0.7 | Ethereum türlerine `TransactionReceipt` ve `Log` sınıfları eklendi
    Ethereum Event nesnesine `receipt` alanı eklendi | +| 0.0.6 | Ethereum Transaction nesnesine `nonce` alanı eklendi
    Ethereum Block nesnesine `baseFeePerGas` eklendi | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Ethereum SmartContractCall nesnesine `functionSignature` alanı eklendi | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Ethereum Transaction nesnesine `input` alanı eklendi | ### Dahili Türler @@ -241,15 +241,15 @@ export function handleTransfer(event: TransferEvent): void { // İşlem hash'ını olay kimliği olarak kullanarak bir Transfer varlığı oluşturun let id = event.transaction.hash let transfer = new Transfer(id) - + // Olay parametrelerini kullanarak varlığın özelliklerini ayarlayın transfer.from = event.params.from transfer.to = event.params.to transfer.amount = event.params.amount - + // Varlığı depoya kaydedin transfer.save() -} + } ``` Zincir işlenirken bir `Transfer` olayıyla karşılaşıldığında, oluşturulan `Transfer` türü (burada varlık türüyle adlandırma çakışmasını önlemek için `TransferEvent` olarak adlandırılmıştır) kullanılarak `handleTransfer` olay işleyicisine aktarılır. Bu tür, olayın ana işlemi ve parametreleri gibi verilere erişim sağlar. @@ -280,7 +280,7 @@ Varlık henüz depoda mevcut olmayabileceğinden, `load` yöntemi `Transfer | nu `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 ve `@graphprotocol/graph-cli` v0.49.0 itibarıyla `loadInBlock` metodu tüm varlık türlerinde kullanılabilir hale gelmiştir. -Store API'si mevcut blokta oluşturulan veya güncellenen varlıkların alınmasına olanak tanır. Bunun tipik bir örneği, bir işleyicinin zincir-üzeri bir olaydan bir işlem oluşturması ve sonraki bir işleyicinin, bu işlem mevcutsa ona erişmek istemesidir. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - Eğer işlem mevcut değilse subgraph sırf varlığın mevcut olmadığını öğrenmek için veritabanına başvurmak zorunda kalacaktır. Ancak, subgraph yazarı varlığın aynı blokta oluşturulmuş olması gerektiğini zaten biliyorsa, `loadInBlock` kullanmak bu veritabanı sorgusunu ortadan kaldırır. - Bazı subgraph'lerde bu başarısız aramalar endeksleme süresine önemli ölçüde etki edebilir. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Akıllı sözleşmeler bazen IPFS dosyalarını zincire sabitler. Bu, eşlemelerin sözleşmeden IPFS hash'lerini almasını ve ilgili dosyaları IPFS'ten okumasını sağlar. Dosya verileri `Bytes` olarak döndürülür ve genellikle daha fazla işleme tabi tutulması gerekir; örneğin, bu sayfada daha sonra belgelenen `json` API'si ile. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. IPFS hash'ı veya yolu verildiğinde, bir dosyayı IPFS'den okuma şu şekilde yapılır: @@ -771,44 +771,44 @@ Bir değerin türü kesin olduğunda, aşağıdaki yöntemlerden biri kullanıla ### Tip Dönüşümleri Referansı -| Kaynak(lar) | Hedef | Dönüşüm fonksiyonu | -| ----------------- | ----------------- | ---------------------------- | -| Address | Bytes | yok | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | Dizgi (onaltılık) | s.toHexString() or s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | yok | -| Bytes (işaretli) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (işaretsiz) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | Dizgi (onaltılık) | s.toHexString() or s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | yok | -| int32 | i32 | yok | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | yok | -| int64 - int256 | BigInt | yok | -| uint32 - uint256 | BigInt | yok | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| Dizgi (onaltılık) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | +| Kaynak(lar) | Hedef | Dönüşüm fonksiyonu | +| ---------------------- | -------------------- | ---------------------------- | +| Address | Bytes | yok | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | Dizgi (onaltılık) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | yok | +| Bytes (işaretli) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (işaretsiz) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | Dizgi (onaltılık) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | yok | +| int32 | i32 | yok | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | yok | +| int64 - int256 | BigInt | yok | +| uint32 - uint256 | BigInt | yok | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| Dizgi (onaltılık) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | ### Veri Kaynağı Meta Verileri From 8d6f61b47d6fa2dca10d6c57a2dae7b95706f8ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:28 -0500 Subject: [PATCH 0168/1534] New translations api.mdx (Ukrainian) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/uk/subgraphs/developing/creating/graph-ts/api.mdx index 4c5ce57993f2..5cd8ab120b25 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Version | Release notes | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Built-in Types @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Given an IPFS hash or path, reading a file from IPFS is done as follows: From 3a0e2b9f760cf0ffb72a88f43ce24bb7d7cf80cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:29 -0500 Subject: [PATCH 0169/1534] New translations api.mdx (Chinese Simplified) --- .../developing/creating/graph-ts/api.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/zh/subgraphs/developing/creating/graph-ts/api.mdx index 708704b73b07..3f5accd21882 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ Since language mappings are written in AssemblyScript, it is useful to review th 子图清单中的 `apiVersion` 指定了由 Graph Node 运行的特定子图的映射 API 版本。 -| 版本 | Release 说明 | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | 添加了 `TransactionReceipt` 和 `Log` 类到以太坊类型。
    已将 `receipt` 字段添加到Ethereum Event对象。 | -| 0.0.6 | 向Ethereum Transaction对象添加了 nonce 字段 向 Etherum Block对象添加
    baseFeePerGas字段 | -| 0.0.5 | AssemblyScript 升级到版本 0.19.10(这包括重大更改,参阅
    迁移指南)ethereum.transaction.gasUsed 重命名为 ethereum.transaction.gasLimit | -| 0.0.4 | 已向 Ethereum SmartContractCall对象添加了 `functionSignature` 字段。 | -| 0.0.3 | 已向Ethereum Call 对象添加了 `from` 字段。
    `etherem.call.address` 被重命名为 `ethereum.call.to`。 | -| 0.0.2 | 已向Ethereum Transaction对象添加了 `input` 字段。 | +| 版本 | Release 说明 | +| :---: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | 添加了 `TransactionReceipt` 和 `Log` 类到以太坊类型。
    已将 `receipt` 字段添加到Ethereum Event对象。 | +| 0.0.6 | 向Ethereum Transaction对象添加了 nonce 字段 向 Etherum Block对象添加
    baseFeePerGas字段 | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | 已向 Ethereum SmartContractCall对象添加了 `functionSignature` 字段。 | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | 已向Ethereum Transaction对象添加了 `input` 字段。 | ### 内置类型 @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value 截至 `graph-node` v0.31.0、`@graphprotocol/graph-ts` v0.30.0 和 `@graphprotocol/graph-cli` v0.49.0,所有实体类型上都提供了 `loadInBlock` 方法。 -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -671,7 +671,7 @@ export function handleSomeEvent(event: SomeEvent): void { 从 '@graphprotocol/graph-ts'导入{ ipfs } ``` -智能合约偶尔会在链上固定 IPFS 文件。这允许映射从合约获取 IPFS 哈希并从 IPFS 读取相应的文件。文件数据将作为 `Bytes` 返回,通常需要进一步处理,例如使用稍后在本页面中记录的 `json` API。 +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. 给定一个 IPFS hash或路径,从 IPFS 读取文件的过程如下: @@ -769,19 +769,19 @@ if (value.kind == JSONValueKind.BOOL) { ### 类型转换参考 -| 源类型 | 目标类型 | 转换函数 | +| 源类型 | 目标类型 | 转换函数 | | -------------------- | -------------------- | ---------------------------- | | Address | Bytes | none | | Address | String | s.toHexString() | | BigDecimal | String | s.toString() | | BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() 或 s.toHex() | +| BigInt | String (hexadecimal) | s.toHexString() 或 s.toHex() | | BigInt | String (unicode) | s.toString() | | BigInt | i32 | s.toI32() | | Boolean | Boolean | none | | Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | | Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() 或 s.toHex() | +| Bytes | String (hexadecimal) | s.toHexString() 或 s.toHex() | | Bytes | String (unicode) | s.toString() | | Bytes | String (base58) | s.toBase58() | | Bytes | i32 | s.toI32() | From 807ecad34a33ebc82bf45d60456bd738b8e606ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:31 -0500 Subject: [PATCH 0170/1534] New translations api.mdx (Urdu (Pakistan)) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/ur/subgraphs/developing/creating/graph-ts/api.mdx index 89c920dcb52a..f4e52ec28767 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| ورزن | جاری کردہ نوٹس | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| ورزن | جاری کردہ نوٹس | +| :---: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | | 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### بلٹ ان اقسام @@ -225,7 +225,7 @@ The `store` API allows to load, save and remove entities from and to the Graph N Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. -#### ہستیوں کی تخلیق +#### Creating entities ایتھیریم ایونٹس سے ہستیوں کو بنانے کے لیے درج ذیل ایک عام نمونہ ہے. @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. IPFS ہیش یا پاتھ کو دیکھتے ہوئے، IPFS سے فائل کو پڑھنا اس طرح کیا جاتا ہے: From 05fd7491214be3a0cabead62665fa51fc13865eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:32 -0500 Subject: [PATCH 0171/1534] New translations api.mdx (Vietnamese) --- .../developing/creating/graph-ts/api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/vi/subgraphs/developing/creating/graph-ts/api.mdx index 19b154a479c1..731770a79c96 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| Phiên bản | Ghi chú phát hành | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Phiên bản | Ghi chú phát hành | +| :-------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### Các loại cài sẵn @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. Given an IPFS hash or path, reading a file from IPFS is done as follows: From 9d2c7e33801d384ecf0ecf42acdf21febb05e924 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:33 -0500 Subject: [PATCH 0172/1534] New translations api.mdx (Marathi) --- .../developing/creating/graph-ts/api.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/mr/subgraphs/developing/creating/graph-ts/api.mdx index 75baf7701a15..97cca409a0eb 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/graph-ts/api.mdx @@ -29,16 +29,16 @@ The `@graphprotocol/graph-ts` library provides the following APIs: The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| आवृत्ती | रिलीझ नोट्स | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| आवृत्ती | रिलीझ नोट्स | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | ### अंगभूत प्रकार @@ -280,7 +280,7 @@ As the entity may not exist in the store yet, the `load` method returns a value As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. - For some subgraphs, these missed lookups can contribute significantly to the indexing time. @@ -376,7 +376,7 @@ store.remove('Transfer', id) ### इथरियम API -Ethereum API स्मार्ट कॉन्ट्रॅक्ट्स, पब्लिक स्टेट व्हेरिएबल्स, कॉन्ट्रॅक्ट फंक्शन्स, इव्हेंट्स, व्यवहार, ब्लॉक्स आणि एन्कोडिंग/डिकोडिंग इथरियम डेटामध्ये प्रवेश प्रदान करते. +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. #### इथरियम प्रकारांसाठी समर्थन @@ -672,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { '@graphprotocol/graph-ts' वरून { ipfs } आयात करा ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. आयपीएफएस हॅश किंवा पथ दिल्यास, आयपीएफएस मधून फाइल वाचणे खालीलप्रमाणे केले जाते: From b05cbfc87c3b9c0255ce1d4134abdec9e0573ef7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:35 -0500 Subject: [PATCH 0173/1534] New translations api.mdx (Hindi) --- .../developing/creating/graph-ts/api.mdx | 42 +++++++++---------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/graph-ts/api.mdx b/website/src/pages/hi/subgraphs/developing/creating/graph-ts/api.mdx index fd52cda96906..3c388a628d5b 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/graph-ts/api.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/graph-ts/api.mdx @@ -1,8 +1,8 @@ --- -title: असेंबलीस्क्रिप्ट एपीआई +title: AssemblyScript API --- -> ध्यान दें: यदि आपने `graph-cli`/`graph-ts` version `0.22.0` से पहले एक subgragh बनाया है, तो आप AssemblyScript के पुराना संस्करण का उपयोग कर रहे हैं, हम अनुशंसा करते हैं कि [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/). +> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/). यह पृष्ठ दस्तावेज करता है कि Subgraph मैपिंग लिखते समय किन अंतर्निहित एपीआई का उपयोग किया जा सकता है। बॉक्स से बाहर दो प्रकार के एपीआई उपलब्ध हैं: @@ -13,7 +13,7 @@ title: असेंबलीस्क्रिप्ट एपीआई चूंकि भाषा मैपिंग्स AssemblyScript में लिखी गई हैं, इसलिए [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) से भाषा और मानक पुस्तकालय की सुविधाओं की समीक्षा करना उपयोगी है। -## एपीआई संदर्भ +## API Reference The `@graphprotocol/graph-ts` library provides the following APIs: @@ -25,22 +25,22 @@ The `@graphprotocol/graph-ts` library provides the following APIs: - A `crypto` API to use cryptographic functions. - एथेरियम, JSON, ग्राफक्यूएल और असेंबलीस्क्रिप्ट जैसे विभिन्न प्रकार की प्रणालियों के बीच अनुवाद करने के लिए निम्न-स्तरीय आदिम। -### संस्करणों +### Versions The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. -| संस्करण | रिलीज नोट्स | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | +| Version | Release notes | +| :-----: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | +| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/resources/release-notes/assemblyscript-migration-guide/))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `ethereum.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | -### अंतर्निहित प्रकार +### Built-in Types Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://www.assemblyscript.org/types.html). @@ -280,7 +280,7 @@ if (transfer == null) { As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. -स्टोर एपीआई वर्तमान ब्लॉक में बनाए गए या अपडेट किए गए Entities को पुनः प्राप्त करने की सुविधा प्रदान करता है। इस स्थिति में एक हैंडलर किसी ऑन-चेन घटना से एक लेन-देन बनाता है, और एक बाद वाला हैंडलर चाहता है कि यदि यह लेन-देन मौजूद है, तो वह इसे एक्सेस कर सके। +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some onchain event, and a later handler wants to access this transaction if it exists. - यदि लेन-देन मौजूद नहीं है, तो subgraph को केवल यह पता लगाने के लिए डेटाबेस में जाना होगा कि Entity मौजूद नहीं है। यदि subgraph लेखक पहले से जानता है कि Entity उसी ब्लॉक में बनाई जानी चाहिए थी, तो `loadInBlock` का उपयोग इस डेटाबेस राउंडट्रिप से बचाता है। - कुछ subgraphs के लिए, ये छूटे हुए लुकअप्स indexing समय में महत्वपूर्ण योगदान दे सकते हैं। @@ -595,11 +595,7 @@ The `log` API includes the following functions: The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. ```typescript -log.info('संदेश प्रदर्शित किया जाना है: {}, {}, {}', [ - value.toString(), - OtherValue.toString(), - 'पहले से ही एक स्ट्रिंग', -]) +log.info ('संदेश प्रदर्शित किया जाना है: {}, {}, {}', [value.toString (), OtherValue.toString (), 'पहले से ही एक स्ट्रिंग']) ``` #### एक या अधिक मान लॉग करना @@ -676,7 +672,7 @@ export function handleSomeEvent(event: SomeEvent): void { import { ipfs } from '@graphprotocol/graph-ts' ``` -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. +Smart contracts occasionally anchor IPFS files onchain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. IPFS हैश या पथ को देखते हुए, IPFS से फ़ाइल पढ़ना निम्नानुसार किया जाता है: @@ -813,7 +809,7 @@ When the type of a value is certain, it can be converted to a [built-in type](#b | String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | | String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | -### डेटा स्रोत मेटाडेटा +### Data Source Metadata You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: From 995d90547c3dd1961c2cdb729468ca0645fd3a23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:36 -0500 Subject: [PATCH 0174/1534] New translations common-issues.mdx (Romanian) --- .../ro/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ro/subgraphs/developing/creating/graph-ts/common-issues.mdx index 5b99efa8f493..f8d0c9c004c2 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Common AssemblyScript Issues There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 695adb9c9e191b22838731153d46714b026c382b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:37 -0500 Subject: [PATCH 0175/1534] New translations common-issues.mdx (French) --- .../fr/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/fr/subgraphs/developing/creating/graph-ts/common-issues.mdx index b50b0404002a..a946b30a71b1 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Common AssemblyScript Issues Il existe certains problèmes courants avec [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) lors du développement de subgraph. Ces problèmes varient en termes de difficulté de débogage, mais les connaître peut être utile. Voici une liste non exhaustive de ces problèmes : -- Les variables de classe `Private` ne sont pas appliquées dans [AssembyScript] (https://www.assemblyscript.org/status.html#language-features). Il n'y a aucun moyen de protéger les variables de classe d'une modification directe à partir de l'objet de la classe. +- Les variables de classe `Private` ne sont pas appliquées dans [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). Il n'y a aucun moyen de protéger les variables de classe d'une modification directe à partir de l'objet de la classe. - La portée n'est pas héritée dans les [fonctions de fermeture] (https://www.assemblyscript.org/status.html#on-closures), c'est-à-dire que les variables déclarées en dehors des fonctions de fermeture ne peuvent pas être utilisées. Explication dans les [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From feafe26a95824ab6393e8d0516f9c71dad09f9fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:38 -0500 Subject: [PATCH 0176/1534] New translations common-issues.mdx (Spanish) --- .../es/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/es/subgraphs/developing/creating/graph-ts/common-issues.mdx index 99563f803d43..9b540b6d07d4 100644 --- a/website/src/pages/es/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Problemas comunes de AssemblyScript There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From df0bf7305107e1954f34399cb1ac828c348a90e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:39 -0500 Subject: [PATCH 0177/1534] New translations common-issues.mdx (Arabic) --- .../ar/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ar/subgraphs/developing/creating/graph-ts/common-issues.mdx index e71839295d94..6c50af984ad0 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: مشاكل شائعة في أسمبلي سكريبت (AssemblyScript) هناك بعض مشاكل [أسمبلي سكريبت](https://github.com/AssemblyScript/assemblyscript) المحددة، التي من الشائع الوقوع فيها أثتاء تطوير غرافٍ فرعي. وهي تتراوح في صعوبة تصحيح الأخطاء، ومع ذلك، فإنّ إدراكها قد يساعد. وفيما يلي قائمة غير شاملة لهذه المشاكل: -- متغيرات الفئات الخاصة (Private) في [AssembyScript] (https://www.assemblyscript.org/status.html#language-features) غير مفروضة بشكل كامل. ليس هناك طريقة لحماية متغيرات الفئات من التعديل المباشر من كائن الفئة. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - لا يتم توريث النطاق في [دوال الإغلاق](https://www.assemblyscript.org/status.html#on-closures)، أي لا يمكن استخدام المتغيرات المعلنة خارج دوال الإغلاق. الشرح في [ النقاط الهامة للمطورين #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 40c98686730462944ab921e69c6da77ed08a7011 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:40 -0500 Subject: [PATCH 0178/1534] New translations common-issues.mdx (Czech) --- .../cs/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/cs/subgraphs/developing/creating/graph-ts/common-issues.mdx index 8bc8119212bd..79ec3df1a827 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Běžné problémy se AssemblyScript Při vývoji podgrafů se často vyskytují určité problémy [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Jejich obtížnost při ladění je různá, nicméně jejich znalost může pomoci. Následuje neúplný seznam těchto problémů: -- Proměnné třídy `Private` nejsou v [AssembyScript](https://www.assemblyscript.org/status.html#language-features) vynucovány. Neexistuje žádný způsob, jak chránit proměnné třídy před přímou změnou z objektu třídy. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Rozsah se nedědí do [uzavíracích funkcí](https://www.assemblyscript.org/status.html#on-closures), tj. proměnné deklarované mimo uzavírací funkce nelze použít. Vysvětlení v [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From b2cdd886e9e9d49920631c4356ac9b0b840d25a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:41 -0500 Subject: [PATCH 0179/1534] New translations common-issues.mdx (German) --- .../de/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/de/subgraphs/developing/creating/graph-ts/common-issues.mdx index 5b99efa8f493..f8d0c9c004c2 100644 --- a/website/src/pages/de/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Common AssemblyScript Issues There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 81dee60a1a875afded5083de9cb002f9b4cb7d69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:42 -0500 Subject: [PATCH 0180/1534] New translations common-issues.mdx (Italian) --- .../it/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/it/subgraphs/developing/creating/graph-ts/common-issues.mdx index 541344663535..8d714dad8499 100644 --- a/website/src/pages/it/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Problemi comuni di AssemblyScript Ci sono alcuni problemi [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) in cui è comune imbattersi durante lo sviluppo di subgraph. La loro difficoltà di debug è variabile, ma conoscerli può essere d'aiuto. Quello che segue è un elenco non esaustivo di questi problemi: -- Le variabili di classe `Private` non sono applicate in [AssembyScript] (https://www.assemblyscript.org/status.html#language-features). Non c'è modo di proteggere le variabili di classe dalla modifica diretta dell'oggetto classe. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - L'ambito non è ereditato nelle [closure functions](https://www.assemblyscript.org/status.html#on-closures), cioè le variabili dichiarate al di fuori delle closure functions non possono essere utilizzate. Spiegazione in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 0980b6125ab76e06e32386c5be5f02561bd2f1c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:43 -0500 Subject: [PATCH 0181/1534] New translations common-issues.mdx (Japanese) --- .../ja/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ja/subgraphs/developing/creating/graph-ts/common-issues.mdx index b9f329351035..9bb0634b57b3 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: AssemblyScriptのよくある問題 AssemblyScript](https://github.com/AssemblyScript/assemblyscript)には、サブグラフの開発中によく遭遇する問題があります。これらの問題は、デバッグの難易度に幅がありますが、認識しておくと役に立つかもしれません。以下は、これらの問題の非網羅的なリストです: -- AssembyScript](https://www.assemblyscript.org/status.html#language-features) では `Private` クラス変数は強制されません。クラス変数がクラスオブジェクトから直接変更されないようにする方法はありません。 +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - スコープは[クロージャー関数](https://www.assemblyscript.org/status.html#on-closures)には継承されません。つまり、クロージャー関数の外で宣言された変数は使用できません。Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s)に説明があります。 From b17bd0b2dbb03eb27216947131c7959a5ce714b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:44 -0500 Subject: [PATCH 0182/1534] New translations common-issues.mdx (Korean) --- .../ko/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ko/subgraphs/developing/creating/graph-ts/common-issues.mdx index 5b99efa8f493..f8d0c9c004c2 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Common AssemblyScript Issues There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From e8e01f731af44161b9db9f8221280ca287a71ebe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:45 -0500 Subject: [PATCH 0183/1534] New translations common-issues.mdx (Dutch) --- .../nl/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/nl/subgraphs/developing/creating/graph-ts/common-issues.mdx index 5b99efa8f493..f8d0c9c004c2 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Common AssemblyScript Issues There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 42e69e0984625d6295f1c4d79836fcb170fa761e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:46 -0500 Subject: [PATCH 0184/1534] New translations common-issues.mdx (Polish) --- .../pl/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/pl/subgraphs/developing/creating/graph-ts/common-issues.mdx index 5b99efa8f493..f8d0c9c004c2 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Common AssemblyScript Issues There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 6ffd22ffe98922994d2297c19b9824440005f472 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:47 -0500 Subject: [PATCH 0185/1534] New translations common-issues.mdx (Portuguese) --- .../pt/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/pt/subgraphs/developing/creating/graph-ts/common-issues.mdx index 01b21940efdb..2f5f5b63c40a 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Problemas Comuns no AssemblyScript É comum encontrar certos problemas no [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) durante o desenvolvimento do subgraph. Eles variam em dificuldade de debug, mas vale ter consciência deles. A seguir, uma lista não exaustiva destes problemas: -- Variáveis de classe `Private` não são aplicadas no [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). Não há como evitar que estas variáveis sejam alteradas diretamente a partir do objeto de classe. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - O escopo não é herdado em [funções de closure](https://www.assemblyscript.org/status.html#on-closures), por ex., não é possível usar variáveis declaradas fora de funções de closure. Há uma explicação [neste vídeo](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 7348f4bef73ac77f736b01800d27414c58b90f67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:48 -0500 Subject: [PATCH 0186/1534] New translations common-issues.mdx (Russian) --- .../ru/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ru/subgraphs/developing/creating/graph-ts/common-issues.mdx index a07a63600785..74f717af91a4 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Распространенные проблемы с AssemblyScript Существуют определенные проблемы c [AssemblyScript] (https://github.com/AssemblyScript/assemblyscript), с которыми часто приходится сталкиваться при разработке субграфа. Они различаются по сложности отладки, однако знание о них может помочь. Ниже приведен неполный перечень этих проблем: -- `Переменные класса `Private\` не применяются в [AssembyScript](https://www.assemblyscript.org/status.html#language-features). Невозможно защитить переменные класса от непосредственного изменения из объекта класса. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Область видимости не наследуется [функциями замыкания](https://www.assemblyscript.org/status.html#on-closures), т.е. переменные, объявленные вне функций замыкания, не могут быть использованы. Пояснения см. в [Рекомендациях для разработчиков #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 4627ae92cdc36446ee903343426aa26b3f65a981 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:49 -0500 Subject: [PATCH 0187/1534] New translations common-issues.mdx (Swedish) --- .../sv/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/sv/subgraphs/developing/creating/graph-ts/common-issues.mdx index 71cdf70beba8..b1f7b27f220a 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Vanliga problem med AssemblyScript There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 63ef508ca2dcbdd66b79d5ff6ab81e25c13cb012 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:50 -0500 Subject: [PATCH 0188/1534] New translations common-issues.mdx (Turkish) --- .../tr/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/tr/subgraphs/developing/creating/graph-ts/common-issues.mdx index c0debe317d00..681a0a3c6b31 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Genel AssemblyScript Sorunları Subgraph geliştirme sırasında karşılaşılması muhtemel bazı [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) sorunları bulunmaktadır. Bu sorunlar, hata ayıklama zorluğuna göre değişiklik gösterse de bunların farkında olmak faydalı olabilir. Aşağıda, bu sorunların kapsamlı olmayan bir listesi verilmiştir: -- [AssemblyScript'te](https://www.assemblyscript.org/status.html#language-features) `private` sınıf değişkenleri zorunlu tutulmaz. Sınıf nesnesi üzerinden sınıf değişkenlerinin doğrudan değiştirilmesini engellemenin bir yolu yoktur. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Kapsam [Closure fonksiyonlarına] aktarılmaz (https://www.assemblyscript.org/status.html#on-closures) kalıtılmaz, yani closure fonksiyonlarının dışında tanımlanan değişkenler bu fonksiyonlar içinde kullanılamaz. Daha fazla açıklama için [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s) videosuna bakabilirsiniz. From 8065a8902ee05db0a92c3399b96a7c64f3fae92b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:51 -0500 Subject: [PATCH 0189/1534] New translations common-issues.mdx (Ukrainian) --- .../uk/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/uk/subgraphs/developing/creating/graph-ts/common-issues.mdx index 5b99efa8f493..f8d0c9c004c2 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Common AssemblyScript Issues There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From f6bcdac66f0e40de4422bca382fbe01655a70176 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:52 -0500 Subject: [PATCH 0190/1534] New translations common-issues.mdx (Chinese Simplified) --- .../zh/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/zh/subgraphs/developing/creating/graph-ts/common-issues.mdx index 7eb8c874b075..d8625f05baea 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: AssemblyScript的常见问题 在子图开发过程中,常常会遇到某些 AssemblyScript 问题。它们在调试难度范围内,但是,意识到它们可能会有所帮助。以下是这些问题的非详尽清单: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 9b36d07b09ecbbdd917c9ed5b49f004827805046 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:53 -0500 Subject: [PATCH 0191/1534] New translations common-issues.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/ur/subgraphs/developing/creating/graph-ts/common-issues.mdx index 3496f69d86ba..4b7eaae1c362 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: مشترکہ اسمبلی اسکرپٹ کے مسائل There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 755a0b153b5aff85481a4a154c4c0900be2b7be4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:54 -0500 Subject: [PATCH 0192/1534] New translations common-issues.mdx (Vietnamese) --- .../vi/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/vi/subgraphs/developing/creating/graph-ts/common-issues.mdx index 5b99efa8f493..f8d0c9c004c2 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: Common AssemblyScript Issues There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 197df8c4066e95be3e90d371e04516da0233106e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:55 -0500 Subject: [PATCH 0193/1534] New translations common-issues.mdx (Marathi) --- .../mr/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/mr/subgraphs/developing/creating/graph-ts/common-issues.mdx index 1f9a2b687cc4..d291033f3ff0 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: सामान्य असेंब्लीस्क्रिप्ट There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 2d91cef852de53cb7a7302aaa8c048a4229eb296 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:56 -0500 Subject: [PATCH 0194/1534] New translations common-issues.mdx (Hindi) --- .../hi/subgraphs/developing/creating/graph-ts/common-issues.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/graph-ts/common-issues.mdx b/website/src/pages/hi/subgraphs/developing/creating/graph-ts/common-issues.mdx index baa35e2d653c..155469a5960b 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/graph-ts/common-issues.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/graph-ts/common-issues.mdx @@ -4,5 +4,5 @@ title: आम AssemblyScript मुद्दे There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: -- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- `Private` class variables are not enforced in [AssemblyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. - Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). From 948fcbb0dfa49d0b61393ddfea3996fc9a464707 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:57 -0500 Subject: [PATCH 0195/1534] New translations unit-testing-framework.mdx (Romanian) --- .../creating/unit-testing-framework.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ro/subgraphs/developing/creating/unit-testing-framework.mdx index 591b55feda38..2133c1d4b5c9 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` From 48be372672c842cc8ce10e2dcac5e3a82d40cf7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:58 -0500 Subject: [PATCH 0196/1534] New translations unit-testing-framework.mdx (French) --- .../creating/unit-testing-framework.mdx | 445 +++++++++--------- 1 file changed, 229 insertions(+), 216 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/fr/subgraphs/developing/creating/unit-testing-framework.mdx index 0a63c492dac1..c6fabb9f2d8e 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,52 +2,52 @@ title: Cadre pour les tests unitaires --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Apprenez à utiliser Matchstick, un framework de test unitaire développé par [LimeChain](https://limechain.tech/). Matchstick permet aux développeurs de subgraphs de tester leur logique de mappages dans un environnement sandbox et de déployer avec succès leurs subgraphs. -## Benefits of Using Matchstick +## Avantages de l'utilisation de Matchstick -- It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- Il est écrit en Rust et optimisé pour des hautes performances. +- Il vous donne accès à des fonctionnalités pour développeurs, y compris la possibilité de simuler des appels de contrat, de faire des assertions sur l'état du store, de surveiller les échecs de subgraph, de vérifier les performances des tests, et bien plus encore. -## Démarrage +## Introduction -### Install Dependencies +### Installation des dépendances -In order to use the test helper methods and run tests, you need to install the following dependencies: +Pour utiliser les méthodes d'aide aux tests et exécuter les tests, vous devez installer les dépendances suivantes : ```sh yarn add --dev matchstick-as ``` -### Install PostgreSQL +### Installer PostgreSQL -`graph-node` depends on PostgreSQL, so if you don't already have it, then you will need to install it. +`graph-node` dépend de PostgreSQL, donc si vous ne l'avez pas déjà, vous devrez l'installer. -> Note: It's highly recommended to use the commands below to avoid unexpected errors. +> Remarque : Il est fortement recommandé d'utiliser les commandes ci-dessous pour éviter les erreurs inattendues. -#### Using MacOS +#### En utilisant MacOS -Installation command: +Commande d'installation : ```sh brew install postgresql ``` -Créez un lien symbolique vers la dernière libpq.5.lib _Vous devrez peut-être d'abord créer ce répertoire_ `/usr/local/opt/postgresql/lib/` +Créez un lien symbolique vers la dernière version de libpq.5.lib _Vous devrez peut-être créer ce répertoire d'abord_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib ``` -#### Using Linux +#### En utilisant Linux -Installation command (depends on your distro): +Commande d'installation (dépend de votre distribution) : ```sh sudo apt installer postgresql ``` -### Using WSL (Windows Subsystem for Linux) +### En utilisant WSL (Windows Subsystem for Linux) Vous pouvez utiliser Matchstick sur WSL en utilisant à la fois l'approche Docker et l'approche binaire. Comme WSL peut être un peu délicat, voici quelques conseils au cas où vous rencontreriez des problèmes tels que @@ -61,13 +61,13 @@ ou bien /node_modules/gluegun/build/index.js:13 throw up; ``` -Veuillez vous assurer que vous utilisez une version plus récente de Node.js graph-cli qui ne prend plus en charge la **v10.19.0**, et qu'il s'agit toujours de la version par défaut pour le nouvel Ubuntu. images sur WSL. Par exemple, il est confirmé que Matchstick fonctionne sur WSL avec **v18.1.0**, vous pouvez y accéder soit via **nvm** ou si vous mettez à jour votre Node.js global. N'oubliez pas de supprimer `node_modules` et d'exécuter à nouveau `npm install` après avoir mis à jour votre nodejs ! Ensuite, assurez-vous que **libpq** est installé, vous pouvez le faire en exécutant +Veuillez vous assurer que vous êtes sur une version plus récente de Node.js graph-cli ne prend plus en charge **v10.19.0**, et c'est toujours la version par défaut pour les nouvelles images Ubuntu sur le WSL. Par exemple, il est confirmé que Matchstick fonctionne sur WSL avec **v18.1.0**, vous pouvez passer à cette version via **nvm** ou si vous mettez à jour votre Node.js global. N'oubliez pas de supprimer `node_modules` et de relancer `npm install` après avoir mis à jour votre nodejs ! Ensuite, assurez-vous que **libpq** est installé, vous pouvez le faire en exécutant ``` sudo apt-get install libpq-dev ``` -Et en conclussion, n'utilisez pas `graph test` (qui utilise votre installation globale de graph-cli et pour une raison quelconque, cela semble être cassé sur WSL actuellement), utilisez plutôt `yarn test` ou `npm run test` (cela utilisera l'instance locale de graph-cli au niveau du projet, qui fonctionne à merveille). Pour cela, vous devez bien sûr avoir un script `"test"` dans votre fichier `package.json` qui peut être quelque chose d'aussi simple que +Et enfin, n'utilisez pas `graph test` (qui utilise votre installation globale de graph-cli et pour une raison quelconque, il semble qu'il soit cassé sur WSL actuellement), utilisez plutôt `yarn test` ou `npm run test` (qui utilisera l'instance locale, au niveau du projet, de graph-cli, ce qui fonctionne comme un charme). Pour cela, vous devez bien sûr avoir un script `" test "` dans votre fichier `package.json` qui peut être quelque chose d'aussi simple que ```json { @@ -85,9 +85,9 @@ Et en conclussion, n'utilisez pas `graph test` (qui utilise votre installation g } ``` -### Using Matchstick +### En utilisant Matchstick -Pour utiliser **Matchstick** dans votre projet de subgraph, il suffit d'ouvrir un terminal, de naviguer vers le dossier racine de votre projet et d'exécuter simplement `graph test [options] ` - il télécharge le dernier binaire **Matchstick** et exécute le test spécifié ou tous les tests dans un dossier de test (ou tous les tests existants si aucun datasource flag n'est spécifié). +Pour utiliser **Matchstick** dans votre projet de ssubgraph, ouvrez un terminal, naviguez jusqu'au dossier racine de votre projet et exécutez simplement `graph test [options] ` - il télécharge le dernier binaire **Matchstick** et exécute le test spécifié ou tous les tests dans un dossier de test (ou tous les tests existants si aucun flag de source de données n'est spécifié). ### CLI options @@ -109,35 +109,35 @@ Ce fichier de test sera le seul à être exécuté : graph test path/to/file.test.ts ``` -**Les Options :** +**Options:** ```sh --c, --coverage Exécuter les tests en mode couverture --d, --docker Exécutez les tests dans un conteneur Docker (Remarque : veuillez exécuter à partir du dossier racine du subgraph) --f, --force Binary : télécharge à nouveau le binaire. Docker : télécharge à nouveau le fichier Docker et reconstruit l'image Docker. --h, --help Afficher les informations d'utilisation --l, --logs Enregistre dans la console des informations sur le système d'exploitation, le modèle de processeur et l'URL de téléchargement (à des fins de débogage) --r, --recompile Force les tests à être recompilés --v, --version Choisissez la version du binaire Rust que vous souhaitez télécharger/utiliser +-c, --coverage Exécuter les tests en mode couverture +-d, --docker Exécuter les tests dans un conteneur docker (Note : Veuillez exécuter à partir du dossier racine du subgraph) +-f, --force Binaire : Retélécharge le binaire. Docker : Retélécharge le fichier Docker et reconstruit l'image Docker. +-h, --help Affiche les informations d'utilisation +-l, --logs Enregistre dans la console des informations sur le système d'exploitation, le modèle de processeur et l'URL de téléchargement (à des fins de débogage). +-r, --recompile Force les tests à être recompilés +-v, --version Choisissez la version du binaire rust que vous souhaitez télécharger/utiliser ``` ### Docker -À partir de `graph-cli 0.25.2`, la commande `graph test` prend en charge l'exécution de `matchstick` dans un conteneur Docker avec le `-d drapeau. L'implémentation de Docker utilise bind mount afin de ne pas avoir à reconstruire l'image Docker à chaque fois que la commande graph test -d` est exécutée. Vous pouvez également suivre les instructions du référentiel [matchstick](https://github.com/LimeChain/matchstick#docker-) pour exécuter Docker manuellement. +Depuis `graph-cli 0.25.2`, la commande `graph test` supporte l'exécution de `matchstick` dans un conteneur docker avec l'option `-d`. L'implémentation de docker utilise [bind mount](https://docs.docker.com/storage/bind-mounts/) pour ne pas avoir à reconstruire l'image de docker à chaque fois que la commande `graph test -d` est exécutée. Vous pouvez également suivre les instructions du dépôt [matchstick](https://github.com/LimeChain/matchstick#docker-) pour exécuter docker manuellement. -❗ `graph test -d` force `docker run` à s'exécuter avec le paramètre `-t`. Cela doit être supprimé pour s'exécuter dans des environnements non interactifs (comme GitHub CI). +❗ `graph test -d` force `docker run` à s'exécuter avec le flag `-t`. Ceci doit être supprimé pour fonctionner dans des environnements non-interactifs (comme GitHub CI). -❗ En cas d'exécution préalable de `graph test`, vous risquez de rencontrer l'erreur suivante lors de la construction de docker : +❗ Si vous avez précédemment exécuté `graph test`, vous pouvez rencontrer l'erreur suivante lors du build de docker : ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -Dans ce cas, il faut créer un `.dockerignore` dans le dossier racine et ajoutez `node_modules/binary-install-raw/bin` +Dans ce cas, créez un `.dockerignore` dans le dossier racine et ajoutez `node_modules/binary-install-raw/bin` ### La Configuration -Matchstick peut être configuré pour utiliser des tests personnalisés, des bibliothèques et un chemin de manifeste via le fichier de configuration `matchstick.yaml` : +Matchstick peut être configuré pour utiliser un chemin personnalisé pour les tests, les librairies et les manifestes via le fichier de configuration `matchstick.yaml` : ```yaml testsFolder: path/to/tests @@ -147,23 +147,23 @@ manifestPath: path/to/subgraph.yaml ### Subgraph démonstration -Vous pouvez tester et jouer avec les exemples de ce guide en clonant le repo [Demo Subgraph](https://github.com/LimeChain/demo-subgraph) +Vous pouvez essayer et jouer avec les exemples de ce guide en clonant le [dépôt du Demo Subgraph.](https://github.com/LimeChain/demo-subgraph) ### Tutoriels vidéos -Vous pouvez également consulter la série de vidéos sur [« comment utiliser Matchstick pour écrire des tests unitaires pour vos subgraphs »](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Vous pouvez également consulter la série de vidéos sur [" Comment utiliser Matchstick pour écrire des tests unitaires pour vos subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Structure des tests -_**IMPORTANT: La structure de test décrite ci-dessous dépend de la version `matchstick-as` version >=0.5.0**_ +_**IMPORTANT : La structure de test décrite ci-dessous dépend de la version de `matchstick-as` >=0.5.0**_ ### décrivez() -`describe(name: String , () => {})` - Définit un groupe de test. +`describe(name : String , () => {})` - Définit un groupe de test. -**_Notez :_** +**_Notes:_** -- _Les descriptions ne sont pas indispensable. Vous pouvez toujours utiliser test() à l'ancienne, en dehors des blocs describe()_ +- _Les descriptions ne sont pas obligatoires. Vous pouvez toujours utiliser test() à l'ancienne, en dehors des blocs describe()_ L'exemple: @@ -172,13 +172,13 @@ import { describe, test } from "matchstick-as/assembly/index" import { handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar()", () => { - test("Devrait créer une nouvelle entité Gravatar", () => { + test("Il faut créer une nouvelle entité Gravatar", () => { ... }) }) ``` -Exemple de `décrire ()` imbriqué : +Exemple imbriqué de `describe()` : ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -192,7 +192,7 @@ describe("handleUpdatedGravatar()", () => { }) describe("Lorsque l'entité n'existe pas", () => { - test("il crée une nouvelle entitéy", () => { + test("il crée une nouvelle entité", () => { ... }) }) @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### tester () -`test(name: String, () =>, Should_fail: bool)` - Définit un scénario de test. Vous pouvez utiliser test() à l’intérieur des blocs décrire() ou indépendamment. +`test(name : String, () =>, should_fail : bool)` - Définit un cas de test. Vous pouvez utiliser test() à l'intérieur des blocs describe() ou indépendamment. L'exemple: @@ -212,7 +212,7 @@ import { describe, test } from "matchstick-as/assembly/index" import { handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar()", () => { - test("Devrait créer une nouvelle entité", () => { + test("Doit créer une nouvelle entité", () => { ... }) }) @@ -221,7 +221,7 @@ describe("handleNewGravatar()", () => { ou bien ```typescript -test("handleNewGravatar() doit créer une nouvelle entité", () => { +test("handleNewGravatar() devrait créer une nouvelle entité", () => { ... }) @@ -232,11 +232,11 @@ test("handleNewGravatar() doit créer une nouvelle entité", () => { ### avantTout() -Exécute un bloc de code après tous les tests du fichier. Si `afterAll` est déclaré à l'intérieur d'un bloc `describe`, il est exécuté à la fin de ce bloc `describe`. +Exécute un bloc de code avant tous les tests du fichier. Si `beforeAll` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute au début de ce bloc `describe`. Les Exemples: -Le code contenu dans `beforeAll` sera exécuté une fois avant les tests _all_ du fichier. +Le code contenu dans `beforeAll` s'exécutera une fois avant _tous_ les tests du fichier. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -251,13 +251,13 @@ beforeAll(() => { }) describe("Lorsque l'entité n'existe pas", () => { - test("il devrait créer un nouveau Gravatar avec l'id 0x1", () => { + test("il devrait créer un nouveau Gravatar avec l'identifiant 0x1", () => { ... }) }) describe("Lorsque l'entité existe déjà", () => { - test("il devrait mettre à jour le Gravatar avec l'id 0x0", () => { + test("il devrait mettre à jour le Gravatar avec l'identifiant 0x0", () => { ... }) }) @@ -273,7 +273,7 @@ import { Gravatar } from "../../generated/schema" describe("handleUpdatedGravatar()", () => { beforeAll(() => { let gravatar = new Gravatar("0x0") - gravatar.displayName = "Premier Gravatar" + gravatar.displayName = “First Gravatar” gravatar.save() ... }) @@ -282,7 +282,7 @@ describe("handleUpdatedGravatar()", () => { ... }) - test("crée un nouveau Gravatar avec l'identifiant 0x1", () => ; { + test("crée un nouveau Gravatar avec l'identifiant 0x1", () => { ... }) }) @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Lance un bloc de code après tous les tests du fichier. Si `afterAll` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute à la fin de ce bloc `describe`. +Exécute un bloc de code après tous les tests du fichier. Si `afterAll` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute à la fin de ce bloc `describe`. L'exemple: -Le code situé dans `afterAll` sera exécuté une fois après _all_ tests dans le fichier. +Le code contenu dans `afterAll` sera exécuté une fois après _tous_ les tests du fichier. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -309,41 +309,41 @@ afterAll(() => { }) describe("handleNewGravatar, () => { - test("crée un Gravatar avec l'identifiant 0x0", () => { + test("creates Gravatar with id 0x0", () => { ... }) }) describe("handleUpdatedGravatar", () => { - test("met à jour Gravatar avec l'identifiant 0x0", () => { + test("updates Gravatar with id 0x0", () => { ... }) }) ``` -Le code à l'intérieur de `afterAll` s'exécute une fois après tous les tests du premier bloc de description +Le code contenu dans `afterAll` sera exécuté une fois après tous les tests du premier bloc de description ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) - test("Crée une nouvelle entité avec l'identifiant 0x0", () => { + test("Il crée une nouvelle entité avec l'identifiant 0x0.", () => { ... }) - test("Crée une nouvelle entité avec l'identifiant 0x1", () => { + test("Il crée une nouvelle entité avec l'identifiant 0x1", () => { ... }) }) describe("handleUpdatedGravatar", () => { - test("Met à jour le Gravatar avec l'identifiant 0x0", () => { + test("updates Gravatar with id 0x0", () => { ... }) }) @@ -353,24 +353,24 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Lance un bloc de code avant chaque test. Si `beforeEach` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute avant chaque test de ce bloc `describe`. +Exécute un bloc de code avant chaque test. Si `beforeEach` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute avant chaque test dans ce bloc `describe`. -Exemples : Le code contenu dans `beforeEach` s'exécute avant chaque test. +Exemples : Le code contenu dans `beforeEach` sera exécuté avant chaque test. ```typescript -importez { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" +import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" import { handleNewGravatars } from "./utils" beforeEach(() => { - clearStore() // <-- clear the store before each test in the file + clearStore() // <-- nettoye le store avant chaque test dans le fichier }) describe("handleNewGravatars, () => { - test("A test that requires a clean store", () => { + test("Un test qui nécessite un store propre", () => { ... }) - test("Second that requires a clean store", () => { + test("Second, qui nécessite un store propre", () => { ... }) }) @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -Exécutez un bloc de code avant chaque test. Si `beforeEach` est déclaré à l'intérieur d'un bloc describe, il s'exécute avant chaque test de ce bloc describe +Le code contenu dans `beforeEach` ne s'exécutera qu'avant chaque test de la description ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,19 +392,19 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Met à jour le nom d'affichage (displayName)', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - // code qui devrait mettre à jour le nom d'affichage à 1 Gravatar + // code qui devrait mettre à jour le nom d'affichage (displayName) pour le 1er Gravatar assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') store.remove('Gravatar', '0x0') }) - test('Updates the imageUrl', () => { + test('Met à jour l'imageUrl', () => { assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - // code qui devrait changer le imageUrl en https://www.gravatar.com/avatar/0x0 + // code qui devrait changer l'imageUrl en https://www.gravatar.com/avatar/0x0 assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') store.remove('Gravatar', '0x0') @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Lance un bloc de code après chaque test. Si `afterEach` est déclaré à l'intérieur d'un bloc de `description`, il s'exécute après chaque test de ce bloc de `description`. +Exécute un bloc de code après chaque test. Si `afterEach` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute après chaque test dans ce bloc `describe`. Les Exemples: -Le code dans `afterEach` sera exécuté après chaque test. +Le code contenu dans `afterEach` sera exécuté après chaque test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,25 +441,25 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Met à jour le nom d'affichage (displayName)", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // code qui devrait mettre à jour le nom d'affichage à 1 Gravatar + // code qui devrait mettre à jour le nom d'affichage (displayName) pour le 1er Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) - test("Updates the imageUrl", () => { + test("Met à jour l'imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // code qui devrait changer le imageUrl en https://www.gravatar.com/avatar/0x0 + // code qui devrait changer l'imageUrl en https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -Le code contenu dans `afterEach` exécutera après chaque test dans cette description +Le code contenu dans `afterEach` sera exécuté après chaque test de cette description ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,18 +481,18 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Met à jour le nom d'affichage (displayName)", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // code qui devrait mettre à jour le nom d'affichage à 1 Gravatar + // code qui devrait mettre à jour le nom d'affichage (displayName) pour le 1er Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) - test("Updates the imageUrl", () => { + test("Met à jour l'imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // code qui devrait changer le imageUrl en https://www.gravatar.com/avatar/0x0 + // code qui devrait changer l'imageUrl en https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) @@ -536,36 +536,36 @@ entityCount(entityType: string, expectedCount: i32) À partir de la version 0.6.0, les assertions supportent également les messages d'erreur personnalisés ```typescript -assert.fieldEquals('Gravatar', '0x123', 'id', '0x123', 'L'Id doit être 0x123') +assert.fieldEquals('Gravatar', '0x123', 'id', '0x123', 'Id doit être 0x123') assert.equals(ethereum.Value.fromI32(1), ethereum.Value.fromI32(1), 'La valeur doit être égale à 1') -assert.notInStore('Gravatar', '0x124', 'Gravatar ne doit pas être dans le magasin') +assert.notInStore('Gravatar', '0x124', 'Gravatar ne devrait pas être dans le store') assert.addressEquals(Address.zero(), Address.zero(), 'L'adresse doit être zéro') assert.bytesEquals(Bytes.fromUTF8('0x123'), Bytes.fromUTF8('0x123'), 'Les Bytes doivent être égaux') assert.i32Equals(2, 2, 'I32 doit être égal à 2') assert.bigIntEquals(BigInt.fromI32(1), BigInt.fromI32(1), 'BigInt doit être égal à 1') assert.booleanEquals(true, true, 'Le booléen doit être vrai') -assert.stringEquals('1', '1', 'La chaîne de caractère doit être égale à 1') +assert.stringEquals('1', '1', 'La Chaîne de caractère doit être égale à 1') assert.arrayEquals([ethereum.Value.fromI32(1)], [ethereum.Value.fromI32(1)], 'Les tableaux doivent être égaux') assert.tupleEquals( changetype([ethereum.Value.fromI32(1)]), changetype([ethereum.Value.fromI32(1)]), 'Les tuples doivent être égaux', ) -assert.assertTrue(true, 'Doit être vrai') -assert.assertNull(null, 'Doit être nul') -assert.assertNotNull('pas nul', 'Ne doit pas être nul') +assert.assertTrue(true, 'Devrait être vrai') +assert.assertNull(null, 'Devrait être null') +assert.assertNotNull('not null', 'Doit être non null') assert.entityCount('Gravatar', 1, 'Il devrait y avoir 2 gravatars') -assert.dataSourceCount('GraphTokenLockWallet', 1, 'Le modèle(template) GraphTokenLockWallet doit avoir une source de données') +assert.dataSourceCount('GraphTokenLockWallet', 1, 'Le modèle GraphTokenLockWallet doit avoir une source de données') assert.dataSourceExists( 'GraphTokenLockWallet', - Address.zero().toHexString(), - 'GraphTokenLockWallet doit avoir une source de données pour l'adresse zéro', + Adresse.zero().toHexString(), + 'GraphTokenLockWallet doit avoir une source de données pour zéro adresse', ) ``` ## Écrire un test unitaire -Voyons à quoi ressemblerait un test unitaire simple en utilisant les exemples Gravatar dans le [subgraph de démonstration](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Voyons à quoi ressemblerait un test unitaire simple en utilisant les exemples de Gravatar dans le [Subgraph de Démo](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). En supposant que nous disposions de la fonction de traitement suivante (ainsi que de deux fonctions d'aide pour nous faciliter la vie) : @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -626,49 +626,49 @@ import { Gravatar } from '../../generated/schema' import { NewGravatar } from '../../generated/Gravity/Gravity' import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' -test('Peut appeler des mappings avec des événements personnalisés', () => { - // Crée une entité de test et l'enregistre dans le store comme état initial (optionnel) +test('Possibilité d'appeler des mappages avec des événements personnalisés', () => { + // Créer une entité de test et la sauvegarder dans le store en tant qu'état initial (optionnel) let gravatar = new Gravatar('gravatarId0') gravatar.save() - // Crée des événements factices + // Créer des événements fictifs let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - // Appelle les fonctions de mapping en passant les événements qu'on vient de créer + // Appeler les fonctions de mappage en passant par les événements que nous venons de créer handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - // Vérifie l'état du store + // Affirmer l'état du store assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - // Vide le store afin de commencer le prochain test avec un état propre + // Effacer le store afin de commencer le prochain test sur une ardoise propre clearStore() }) -test('Test suivant', () => { +test('Next test', () => { //... }) ``` -Cela fait beaucoup à décortiquer ! Tout d'abord, une chose importante à noter est que nous importons des choses de `matchstick-as`, notre bibliothèque d'aide AssemblyScript (distribuée en tant que module npm). Vous pouvez trouver le dépôt [ici](https://github.com/LimeChain/matchstick-as). `matchstick-as` nous fournit des méthodes de test utiles et définit également la fonction `test()` que nous utiliserons pour construire nos blocs de test. Le reste est assez simple - voici ce qui se passe : +Cela fait beaucoup à décortiquer ! Tout d'abord, une chose importante à noter est que nous importons des choses à partir de `matchstick-as`, notre bibliothèque d'aide AssemblyScript (distribuée comme un module npm). Vous pouvez trouver le dépôt [ici](https://github.com/LimeChain/matchstick-as). `matchstick-as` nous fournit des méthodes de test utiles et définit également la fonction `test()` que nous utiliserons pour construire nos blocs de test. Le reste est assez simple - voici ce qui se passe : - Mettons en place notre état initial et ajoutons une entité Gravatar personnalisée ; -- Définissons deux objets événement `NewGravatar` avec leurs données, en utilisant la fonction `createNewGravatarEvent()` ; -- Appelons des méthodes de gestion pour ces événements - `handleNewGravatars()` et nous passons la liste de nos événements personnalisés ; +- Nous définissons deux objets d'événement `NewGravatar` avec leurs données, en utilisant la fonction `createNewGravatarEvent()` ; +- Nous appelons les méthodes de gestion de ces événements - `handleNewGravatars()` et nous passons la liste de nos événements personnalisés ; - Affirmons l'état du magasin. Comment cela fonctionne-t-il ? - Nous passons une combinaison unique de type d'entité et d'identifiant. Ensuite, nous vérifions un champ spécifique de cette entité et affirmons qu'il a la valeur que nous attendons. Nous faisons cela à la fois pour l'entité Gravatar initiale que nous avons ajoutée au magasin, ainsi que pour les deux entités Gravatar qui sont ajoutées lorsque la fonction de gestion est appelée ; -- Et enfin, Nettoyons le magasin à l'aide de `clearStore()` afin que notre prochain test puisse commencer avec un objet magasin frais et vide. Nous pouvons définir autant de blocs de test que nous le souhaitons. +- Enfin, nous nettoyons le store en utilisant `clearStore()` afin que notre prochain test puisse commencer avec un objet de store frais et vide. Nous pouvons définir autant de blocs de test que nous le souhaitons. Et voilà, nous avons formulé notre premier test ! 👏 Maintenant, afin d'exécuter nos tests, il suffit d'exécuter ce qui suit dans le dossier racine de votre subgraph : -`gravity graph test` +`graph test Gravity` Et si tout se passe bien, vous devriez être accueilli par ce qui suit : -![Matchstick indiquant « Tous les tests sont réussis ! »](/img/matchstick-tests-passed.png) +![Matchstick avec le message “Tous les tests sont réussis!”](/img/matchstick-tests-passed.png) ## Scénarios de tests actuels @@ -754,18 +754,18 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### Se moquer des fichiers IPFS (à partir de Matchstick 0.4.1) -Les utilisateurs peuvent simuler les fichiers IPFS en utilisant la fonction `mockIpfsFile(hash, filePath)`. La fonction accepte deux arguments, le premier est le hachage/chemin du fichier IPFS et le second est le chemin d'accès à un fichier local. +Les utilisateurs peuvent simuler des fichiers IPFS en utilisant la fonction `mockIpfsFile(hash, filePath)`. La fonction accepte deux arguments, le premier étant le hash/chemin du fichier IPFS et le second le chemin d'un fichier local. -NOTEZ : Lorsque vous testez `ipfs.map/ipfs.mapJSON`, la fonction de rappel doit être exportée du fichier de test afin que matchstck puisse la détecter, comme la fonction `processGravatar()` dans l'exemple de test ci-dessous : +NOTE : Lorsque l'on teste `ipfs.map/ipfs.mapJSON`, la fonction callback doit être exportée depuis le fichier de test afin que matchstck la détecte, comme la fonction `processGravatar()` dans l'exemple de test ci-dessous : -Fichier `.test.ts` : +Ficher `.test.ts` : ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' import { ipfs } from '@graphprotocol/graph-ts' import { gravatarFromIpfs } from './utils' -// Exportation du callback ipfs.map() pour que matchstck le détecte. +// Exporter le callback ipfs.map() pour que matchstck le détecte export { processGravatar } from './utils' test('ipfs.cat', () => { @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -Fichier `utils.ts` : +Fichier `utils.ts` : ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -L'exécution de la fonction assert.fieldEquals() vérifiera l'égalité du champ donné par rapport à la valeur attendue indiquée. Le test échouera et un message d'erreur sera généré si les valeurs sont **NON** égales. Sinon, le test réussira. +L'exécution de la fonction assert.fieldEquals() permet de vérifier l'égalité du champ donné par rapport à la valeur attendue donnée. Le test échouera et un message d'erreur sera affiché si les valeurs **NE SONT PAS** égales. Dans le cas contraire, le test passera avec succès. ### Interagir avec les métadonnées d'événement -Les utilisateurs peuvent utiliser les métadonnées de transaction par défaut, qui peuvent être renvoyées comme un ethereum. Event en utilisant la fonction `newMockEvent()`. L'exemple suivant montre comment vous pouvez lire/écrire dans ces champs sur l'objet Event : +Les utilisateurs peuvent utiliser les métadonnées de transaction par défaut, qui peuvent être reenvoyées comme un ethereum.Event en utilisant la fonction `newMockEvent()`. L'exemple suivant montre comment vous pouvez lire/écrire dans ces champs de l'objet Event : ```typescript // Lisez @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("bonjour"); ethereum.Value.fromString("bonjour")); ``` -### Affirmez qu'une entité **n'existe pas** dans le magasin +### Affirmer qu'une entité n'est **PAS** dans le store Les utilisateurs peuvent affirmer qu'une entité n'existe pas dans le magasin. La fonction prend un type d'entité et un identifiant. Si l'entité se trouve effectivement dans le magasin, le test échouera avec un message d'erreur pertinent. Voici un exemple rapide de la façon d'utiliser cette fonctionnalité : @@ -896,7 +896,7 @@ import { logStore } from 'matchstick-as/assembly/store' logStore() ``` -À partir de la version 0.6.0, `logStore` n'affiche plus les champs dérivés, les utilisateurs peuvent utiliser la nouvelle fonction `logEntity`. Bien sûr, `logEntity` peut être utilisé pour afficher n'importe quelle entité, pas seulement celles qui ont des champs dérivés. `logEntity` prend le type d'entité, l'Id de l'entité et un paramètre `showRelated` pour indiquer si les utilisateurs veulent afficher les entités dérivées liées. +Depuis la version 0.6.0, `logStore` n'affiche plus les champs dérivés, au lieu de celà les utilisateurs peuvent utiliser la nouvelle fonction `logEntity`. Bien sûr, `logEntity` peut être utilisée pour afficher n'importe quelle entité, pas seulement celles qui ont des champs dérivés. `logEntity` prend le type d'entité, l'identifiant de l'entité et un flag `showRelated` pour indiquer si les utilisateurs veulent afficher les entités dérivées associées. ``` import { logEntity } from 'matchstick-as/assembly/store' @@ -911,11 +911,11 @@ Les utilisateurs peuvent s'attendre à des échecs de test, en utilisant l'indic ```typescript test( - 'Devrait générer une erreur', - () => { - lancer une nouvelle erreur() - }, - vrai, + 'Devrait générer une erreur', + () => { + throw new Error() + }, + true, ) ``` @@ -930,19 +930,19 @@ import { test } from "matchstick-as/assembly/index"; import { log } from "matchstick-as/assembly/log"; test("Success", () => { - log.success("Success!". []); + log.success("Succès!". []); }); test("Error", () => { - log.error("Error :( ", []); + log.error("Erreur :( ", []); }); test("Debug", () => { - log.debug("Debugging...", []); + log.debug("Deboggage...", []); }); test("Info", () => { log.info("Info!", []); }); test("Warning", () => { - log.warning("Warning!", []); + log.warning("Avertissement!", []); }); ``` @@ -960,14 +960,14 @@ La journalisation des erreurs critiques arrêtera l’exécution des tests et fe Tester les champs dérivés est une fonctionnalité qui permet aux utilisateurs de définir un champ sur une certaine entité et de faire en sorte qu'une autre entité soit automatiquement mise à jour si elle dérive l'un de ses champs de la première entité. -Avant la version `0.6.0`, il était possible d'obtenir les entités dérivées en les accédant comme des champs/propriétés d'entité, comme ceci : +Avant la version `0.6.0`, il était possible d'obtenir les entités dérivées en y accédant en tant que champs/propriétés d'entité, comme suit : ```typescript let entity = ExampleEntity.load('id') let derivedEntity = entity.derived_entity ``` -À partir de la version `0.6.0`, cela se fait en utilisant la fonction `loadRelated` de graph-node, les entités dérivées peuvent être accessibles de la même manière que dans les gestionnaires. +A partir de la version `0.6.0`, ceci est fait en utilisant la fonction `loadRelated` de graph-node, les entités dérivées peuvent être accédées de la même manière que dans les gestionnaires. ```typescript test('Derived fields example test', () => { @@ -1011,7 +1011,7 @@ test('Derived fields example test', () => { ### Test de `loadInBlock` -Depuis la version `0.6.0`, les utilisateurs peuvent tester `loadInBlock` en utilisant `mockInBlockStore`, ce qui permet de simuler des entités dans le cache du bloc. +Depuis la version `0.6.0`, les utilisateurs peuvent tester `loadInBlock` en utilisant le `mockInBlockStore`, qui permet de simuler des entités dans le cache du bloc. ```typescript import { afterAll, beforeAll, describe, mockInBlockStore, test } from 'matchstick-as' @@ -1026,12 +1026,12 @@ describe('loadInBlock', () => { clearInBlockStore() }) - test('On peut utiliser entity.loadInBlock() pour récupérer l'entité dans le cache du bloc actuel', () => { + test('Peut utiliser entity.loadInBlock() pour récupérer l'entité dans le sore du cache du bloc actuel', () => { let retrievedGravatar = Gravatar.loadInBlock('gravatarId0') assert.stringEquals('gravatarId0', retrievedGravatar!.get('id')!.toString()) }) - test("Renvoie null lors de l'appel de entity.loadInBlock() si une entité n'existe pas dans le bloc actuel", () => { + test("Renvoit null lors de l'appel à entity.loadInBlock() si une entité n'existe pas dans le bloc actuel", () => { let retrievedGravatar = Gravatar.loadInBlock('IDoNotExist') assert.assertNull(retrievedGravatar) }) @@ -1040,56 +1040,56 @@ describe('loadInBlock', () => { ### Tester les sources de données dynamiques -Le test des sources de données dynamiques peut être effectué en simulant la valeur de retour des fonctions `context()`, `address()` et `network()` du Espace de noms dataSource. Ces fonctions renvoient actuellement les éléments suivants : `context()` - renvoie une entité vide (DataSourceContext), `address()` - renvoie `0x000000000000000000000000000000000000000000`, ` network()` - renvoie `mainnet`. Les fonctions `create(...)` et `createWithContext(...)` sont simulées pour ne rien faire, elles n'ont donc pas du tout besoin d'être appelées dans les tests. Les modifications des valeurs de retour peuvent être effectuées via les fonctions de l'espace de noms `dataSourceMock` dans `matchstick-as` (version 0.3.0+). +Le test des sources de données dynamiques peut être effectué en simulant la valeur de retour des fonctions `context()`, `address()` et `network()` du namespace dataSource. Ces fonctions renvoient actuellement les valeurs suivantes `context()` - renvoit une entité vide (DataSourceContext), `address()` - renvoit `0x00000000000000000000000000000000`, `network()` - renvoit `mainnet`. Les fonctions `create(...)` et `createWithContext(...)` sont simulées pour ne rien faire, donc elles n'ont pas besoin d'être appelées dans les tests. Les modifications des valeurs de retour peuvent être faites à travers les fonctions du namespace`dataSourceMock` dans `matchstick-as` (version 0.3.0+). L'exemple ci-dessous : Nous avons d’abord le gestionnaire d’événements suivant (qui a été intentionnellement réutilisé pour présenter la moquerie de la source de données) : ```typescript -fonction d'exportation handleApproveTokenDestinations (événement : ApproveTokenDestinations) : void { - laissez tokenLockWallet = TokenLockWallet.load(dataSource.address().toHexString()) ! - if (dataSource.network() == 'rinkeby') { - tokenLockWallet.tokenDestinationsApproved = true - } - laissez contexte = dataSource.context() - if (context.get('contextVal')!.toI32() > 0) { - tokenLockWallet.setBigInt('tokensReleased', BigInt.fromI32(context.get('contextVal')!.toI32())) - } - tokenLockWallet.save() +export function handleApproveTokenDestinations(event: ApproveTokenDestinations): void { + let tokenLockWallet = TokenLockWallet.load(dataSource.address().toHexString())! + if (dataSource.network() == 'rinkeby') { + tokenLockWallet.tokenDestinationsApproved = true + } + let context = dataSource.context() + if (context.get('contextVal')!.toI32() > 0) { + tokenLockWallet.setBigInt('tokensReleased', BigInt.fromI32(context.get('contextVal')!.toI32())) + } + tokenLockWallet.save() } ``` Et puis nous avons le test utilisant l'une des méthodes de l'espace de noms dataSourceMock pour définir une nouvelle valeur de retour pour toutes les fonctions dataSource : ```typescript -importer { assert, test, newMockEvent, dataSourceMock } depuis 'matchstick-as/assembly/index' -importer { BigInt, DataSourceContext, Value } depuis '@graphprotocol/graph-ts' +import { assert, test, newMockEvent, dataSourceMock } from 'matchstick-as/assembly/index' +import { BigInt, DataSourceContext, Value } from '@graphprotocol/graph-ts' -importer { handleApproveTokenDestinations } depuis '../../src/token-lock-wallet' -importer { ApproveTokenDestinations } depuis '../../generated/templates/GraphTokenLockWallet/GraphTokenLockWallet' -importer { TokenLockWallet } depuis '../../generated/schema' +import { handleApproveTokenDestinations } from '../../src/token-lock-wallet' +import { ApproveTokenDestinations } from '../../generated/templates/GraphTokenLockWallet/GraphTokenLockWallet' +import { TokenLockWallet } from '../../generated/schema' -test('Exemple moqueur simple de source de données', () => { - laissez adresseString = '0xA16081F360e3847006dB660bae1c6d1b2e17eC2A' - let adresse = Adresse.fromString(addressString) +test('Source de données : simple exemple de simulation ', () => { + let addressString = '0xA16081F360e3847006dB660bae1c6d1b2e17eC2A' + let address = Address.fromString(addressString) - laissez wallet = new TokenLockWallet (address.toHexString()) - portefeuille.save() - laisser contexte = new DataSourceContext() - contexte.set('contextVal', Value.fromI32(325)) - dataSourceMock.setReturnValues(addressString, 'rinkeby', contexte) - let event = changetype(newMockEvent()) + let wallet = new TokenLockWallet(address.toHexString()) + wallet.save() + let context = new DataSourceContext() + context.set('contextVal', Value.fromI32(325)) + dataSourceMock.setReturnValues(addressString, 'rinkeby', context) + let event = changetype(newMockEvent()) - assert.assertTrue(!wallet.tokenDestinationsApproved) + assert.assertTrue(!wallet.tokenDestinationsApproved) - handleApproveTokenDestinations (événement) + handleApproveTokenDestinations(event) - portefeuille = TokenLockWallet.load(address.toHexString()) ! - assert.assertTrue(wallet.tokenDestinationsApproved) - assert.bigIntEquals(wallet.tokensReleased, BigInt.fromI32(325)) + wallet = TokenLockWallet.load(address.toHexString())! + assert.assertTrue(wallet.tokenDestinationsApproved) + assert.bigIntEquals(wallet.tokensReleased, BigInt.fromI32(325)) - dataSourceMock.resetValues() + dataSourceMock.resetValues() }) ``` @@ -1097,37 +1097,44 @@ Notez que dataSourceMock.resetValues() est appelé à la fin. C'est parce que le ### Test de la création dynamique de sources de données -Depuis la version `0.6.0`, il est possible de tester si une nouvelle source de données a été créée à partir d'un modèle. Cette fonctionnalité prend en charge les modèles ethereum/contrat et file/ipfs. Il existe quatre fonctions pour cela : +Depuis la version `0.6.0`, il est possible de tester si une nouvelle source de données a été créée à partir d'un modèle. Cette fonctionnalité prend en charge à la fois les modèles ethereum/contract et file/ipfs. Il y a quatre fonctions pour cela : -- `assert.dataSourceCount(templateName, expectedCount)` peut être utilisée pour affirmer le nombre attendu de sources de données à partir du modèle spécifié +- `assert.dataSourceCount(templateName, expectedCount)` peut être utilisé pour affirmer le nombre attendu de sources de données à partir du modèle spécifié - `assert.dataSourceExists(templateName, address/ipfsHash)` affirme qu'une source de données avec l'identifiant spécifié (qui peut être une adresse de contrat ou un hash de fichier IPFS) a été créée à partir d'un modèle spécifié -- `logDataSources(templateName)` affiche toutes les sources de données à partir du modèle spécifié dans la console à des fins de débogage -- `readFile(path)` lit un fichier JSON qui représente un fichier IPFS et retourne le contenu sous forme de Bytes +- `logDataSources(templateName)` affiche toutes les sources de données du modèle spécifié sur la console à des fins de débogage +- `readFile(path)` lit un fichier JSON qui représente un fichier IPFS et renvoie le contenu sous forme d'octets -#### Test des modèles `ethereum/contract` +#### Test des modèles `ethereum/contrat` ```typescript test('ethereum/contract dataSource creation example', () => { - // affirme qu'aucune source de données n'est créée à partir du modèle GraphTokenLockWallet + // Affirmer qu'il n'y a pas de dataSources créées à partir du modèle GraphTokenLockWallet assert.dataSourceCount('GraphTokenLockWallet', 0) - // Crée une nouvelle source de données GraphTokenLockWallet avec l'adresse 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A + + // Créer une nouvelle source de données GraphTokenLockWallet avec l'adresse 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A GraphTokenLockWallet.create(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2A')) - // affirme que la source de données a été créée + + // Affirmer que la source de données a été créée assert.dataSourceCount('GraphTokenLockWallet', 1) - // Ajoute une seconde source de données avec contexte + + // Ajouter une deuxième source de données avec le contexte let context = new DataSourceContext() context.set('contextVal', Value.fromI32(325)) + GraphTokenLockWallet.createWithContext(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'), context) - // Vérifie qu'il y a maintenant 2 sources de données + + // Affirmer qu'il y a maintenant 2 sources de données assert.dataSourceCount('GraphTokenLockWallet', 2) - // affirme qu'une source de données avec l'adresse "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" a été créée - // Gardez à l'esprit que le type `Address` est transformé en minuscules lors du décodage, vous devez donc passer l'adresse en minuscules lorsque vous affirmez son existence + + // Affirme qu'une source de données avec l'adresse "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" a été créée. + // Gardez à l'esprit que le type `Address` est transformé en minuscules lorsqu'il est décodé, vous devez donc passer l'adresse en minuscules lorsque vous affirmez qu'elle existe. assert.dataSourceExists('GraphTokenLockWallet', '0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'.toLowerCase()) + logDataSources('GraphTokenLockWallet') }) ``` -##### Exemple de sortie de `logDataSource` +##### Exemple de sortie `logDataSource` ```bash 🛠 { @@ -1152,12 +1159,12 @@ test('ethereum/contract dataSource creation example', () => { #### Test des modèles `file/ipfs` -De même que les sources de données dynamiques de contrat, les utilisateurs peuvent tester les fichiers sources de données test et leurs gestionnaires +De même que pour les sources de données dynamiques de contrat, les utilisateurs peuvent tester les fichiers sources de données et leurs gestionnaires ##### Exemple `subgraph.yaml` ```yaml ---- +... templates: - kind: file/ipfs name: GraphTokenLockMetadata @@ -1175,7 +1182,7 @@ templates: file: ./abis/GraphTokenLockWallet.json ``` -##### Exemple de fichier `schema.graphql` +##### Exemple `schema.graphql` ```graphql """ @@ -1195,7 +1202,7 @@ type TokenLockMetadata @entity { } ``` -##### Exemple de fichier `metadata.json` +##### Exemple `metadata.json` ```json { @@ -1209,23 +1216,26 @@ type TokenLockMetadata @entity { ##### Exemple de gestionnaire ```typescript -export function handleMetadata(content: Bytes): void { - // dataSource.stringParams() renvoie le CID du fichier de source de donnée +export function handleMetadata(content : Bytes) : void { + // dataSource.stringParams() renvoie le CID du fichier de la source de données // stringParam() sera simulé dans le test du gestionnaire // pour plus d'informations https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files let tokenMetadata = new TokenLockMetadata(dataSource.stringParam()) const value = json.fromBytes(content).toObject() + if (value) { const startTime = value.get('startTime') const endTime = value.get('endTime') const periods = value.get('periods') const releaseStartTime = value.get('releaseStartTime') + if (startTime && endTime && periods && releaseStartTime) { tokenMetadata.startTime = startTime.toBigInt() tokenMetadata.endTime = endTime.toBigInt() tokenMetadata.periods = periods.toBigInt() tokenMetadata.releaseStartTime = releaseStartTime.toBigInt() } + tokenMetadata.save() } } @@ -1236,63 +1246,66 @@ export function handleMetadata(content: Bytes): void { ```typescript import { assert, test, dataSourceMock, readFile } from 'matchstick-as' import { Address, BigInt, Bytes, DataSourceContext, ipfs, json, store, Value } from '@graphprotocol/graph-ts' + import { handleMetadata } from '../../src/token-lock-wallet' import { TokenLockMetadata } from '../../generated/schema' import { GraphTokenLockMetadata } from '../../generated/templates' -test('exemple de création de source de données file/ipfs', () => { - // Générer le CID de la source de données à partir du ipfsHash + chemin du fichier du ipfs +test('exemple de création d'une dataSource file/ipfs', () => { + // Générer le CID de la source de données à partir du fichier ipfsHash + chemin ipfs // Par exemple QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' const CID = `${ipfshash}/example.json` - // Création d'une nouvelle source de données en utilisant le CID généré + // Créer une nouvelle dataSource en utilisant le CID généré GraphTokenLockMetadata.create(CID) - // Affirmer que la source de données a été créée + // Affirmer que la dataSource a été créée assert.dataSourceCount('GraphTokenLockMetadata', 1) assert.dataSourceExists('GraphTokenLockMetadata', CID) logDataSources('GraphTokenLockMetadata') - // Maintenant, nous devons simuler les métadonnées de la source de données et plus particulièrement dataSource.stringParam() - // dataSource.stringParams utilise en fait la valeur de dataSource.address(), donc nous allons simuler l'adresse en utilisant dataSourceMock de matchstick-as - // Tout d'abord, nous allons réinitialiser les valeurs et ensuite utiliser dataSourceMock.setAddress() pour définir le CID + // Nous devons maintenant simuler les métadonnées de la dataSource et plus particulièrement dataSource.stringParam() + // dataSource.stringParams utilise en fait la valeur de dataSource.address(), nous allons donc simuler l'adresse en utilisant dataSourceMock de matchstick-as + // Nous allons d'abord réinitialiser les valeurs, puis utiliser dataSourceMock.setAddress() pour définir le CID. dataSourceMock.resetValues() dataSourceMock.setAddress(CID) - // Maintenant, nous devons générer les Bytes à passer au gestionnaire de la source de données - // Pour ce cas, nous avons introduit une nouvelle fonction readFile, qui lit un json local et renvoie le contenu sous forme de Bytes - const content = readFile('path/to/metadata.json') + + // Nous devons maintenant générer les octets à transmettre au gestionnaire de la dataSource. + // Pour ce cas, nous avons introduit une nouvelle fonction readFile, qui lit un json local et renvoie le contenu sous forme d'octets + const content = readFile(`path/to/metadata.json`) handleMetadata(content) - // Maintenant, nous allons tester si un TokenLockMetadata a été créé + // Maintenant nous allons tester si un TokenLockMetadata a été créé const metadata = TokenLockMetadata.load(CID) - assert.bigIntEquals(metadata!.endTime, BigInt.fromI32(1)) - assert.bigIntEquals(metadata!.periods, BigInt.fromI32(1)) - assert.bigIntEquals(metadata!.releaseStartTime, BigInt.fromI32(1)) - assert.bigIntEquals(metadata!.startTime, BigInt.fromI32(1)) + + assert.bigIntEquals(metadata !.endTime, BigInt.fromI32(1)) + assert.bigIntEquals(metadata !.periods, BigInt.fromI32(1)) + assert.bigIntEquals(metadata !.releaseStartTime, BigInt.fromI32(1)) + assert.bigIntEquals(metadata !.startTime, BigInt.fromI32(1)) }) ``` ## Couverture de test -Grâce à **Matchstick**, les développeurs de subgraphs peuvent exécuter un script qui calculera la couverture des tests unitaires écrits. +En utilisant **Matchstick**, les développeurs de subgraphs peuvent exécuter un script qui calculera la couverture des tests unitaires écrits. -L'outil de couverture de test prend les binaires de test `wasm` compilés et les convertit en fichiers `wat`, qui peuvent ensuite être facilement inspectés pour voir si les gestionnaires définis dans `subgraph .yaml` ont été appelés. Étant donné que la couverture du code (et les tests dans leur ensemble) en sont à leurs tout premiers stades dans AssemblyScript et WebAssembly, **Matchstick** ne peut pas vérifier la couverture des branches. Au lieu de cela, nous nous appuyons sur l'affirmation selon laquelle si un gestionnaire donné a été appelé, l'événement/la fonction correspondant a été correctement simulé. +L'outil de couverture des tests prend les binaires de test compilés `wasm` et les convertit en fichiers `wat`, qui peuvent alors être facilement inspectés pour voir si les gestionnaires définis dans `subgraph.yaml` ont été appelés ou non. Comme la couverture du code (et les tests dans leur ensemble) n'en est qu'à ses débuts en AssemblyScript et WebAssembly, **Matchstick** ne peut pas vérifier la couverture des branches. Au lieu de cela, nous nous appuyons sur l'affirmation que si un gestionnaire donné a été appelé, l'événement/la fonction correspondant(e) a été correctement simulé(e). -### Conditions préalables +### Prerequisites -Pour exécuter la fonctionnalité de couverture de test fournie dans **Matchstick**, vous devez préparer quelques éléments au préalable : +Pour utiliser la fonctionnalité de couverture des tests fournie dans **Matchstick**, il y a quelques éléments à préparer à l'avance : #### Exportez vos gestionnaires -Pour que **Matchstick** vérifie quels gestionnaires sont exécutés, ces gestionnaires doivent être exportés à partir du **fichier de test**. Ainsi, par exemple, dans notre exemple, dans notre fichier gravitation.test.ts, nous avons le gestionnaire suivant en cours d'importation : +Pour que **Matchstick** puisse vérifier quels handlers sont exécutés, ces handlers doivent être exportés depuis le **fichier de test**. Ainsi, dans notre exemple, dans notre fichier gravity.test.ts, nous avons importé le gestionnaire suivant : ```typescript importez { handleNewGravatar } from '../../src/gravity' ``` -Pour que cette fonction soit visible (pour qu'elle soit incluse dans le fichier `wat` **par son nom**), nous devons également l'exporter, comme ceci : +Pour que cette fonction soit visible (pour qu'elle soit incluse dans le fichier `wat` **par nom**), nous devons également l'exporter, comme ceci : ```typescript exportez { handleNewGravatar } @@ -1306,20 +1319,20 @@ Une fois tout configuré, pour exécuter l'outil de couverture de test, exécute graph test -- -c ``` -Vous pouvez également ajouter une commande `coverage` personnalisée à votre fichier `package.json`, comme ceci : +Vous pouvez également ajouter une commande `coverage` personnalisée à votre fichier `package.json`, comme ceci : ```typescript "scripts": { - /.../ - "coverage": "test graph -- -c" - }, + /.../ + "coverage": "graph test -- -c" + }, ``` Cela exécutera l'outil de couverture et vous devriez voir quelque chose comme ceci dans le terminal : ```sh $ graph test -c -Sauter l'étape de téléchargement/installation car le binaire existe déjà à l'adresse suivante : /Users/petko/work/demo-subgraph/node_modules/binary-install-raw/bin/0.4.0 +Skipping download/install step because binary already exists at /Users/petko/work/demo-subgraph/node_modules/binary-install-raw/bin/0.4.0 ___ ___ _ _ _ _ _ | \/ | | | | | | | (_) | | @@ -1328,13 +1341,13 @@ ___ ___ _ _ _ _ _ | | | | (_| | || (__| | | \__ \ |_| | (__| < \_| |_/\__,_|\__\___|_| |_|___/\__|_|\___|_|\_\ -Compilation... +Compiling... -Exécution en mode rapport de couverture. +Running in coverage report mode. ️ -Lecture des modules de test générés... 🔎️ +Reading generated test modules... 🔎️ -Génération du rapport de couverture 📝 +Generating coverage report 📝 Handlers for source 'Gravity': Handler 'handleNewGravatar' is tested. @@ -1358,13 +1371,13 @@ Global test coverage: 22.2% (2/9 handlers). La sortie du journal inclut la durée de l’exécution du test. Voici un exemple : -`[Jeudi 31 mars 2022 13:54:54 +0300] Programme exécuté en : 42,270 ms.` +`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` ## Erreurs de compilation courantes > Critique : impossible de créer WasmInstance à partir d'un module valide avec un contexte : importation inconnue : wasi_snapshot_preview1::fd_write n'a pas été défini -Ceci signifie que vous avez utilisé `console.log` dans votre code, ce qui n'est pas pris en charge par AssemblyScript. Veuillez envisager d'utiliser l'[API de journalisation](/subgraphs/developing/creating/graph-ts/api/#logging-api) +Ceci signifie que vous avez utilisé `console.log` dans votre code, qui n'est pas pris en charge par AssemblyScript. Veuillez considérer l'utilisation de [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api) > ERREUR TS2554 : attendu ? arguments, mais j'ai eu ?. > @@ -1372,17 +1385,17 @@ Ceci signifie que vous avez utilisé `console.log` dans votre code, ce qui n'est > > dans ~lib/matchstick-as/assembly/defaults.ts(18,12) > -> ERROR TS2554: Expected ? arguments, but got ?. +> ERREUR TS2554 : attendu ? arguments, mais j'ai eu ?. > > renvoyer un nouveau ethereum.Transaction (defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt) ; > > dans ~lib/matchstick-as/assembly/defaults.ts(24,12) -L'inadéquation des arguments est causée par une inadéquation entre `graph-ts` et `matchstick-as`. La meilleure façon de résoudre des problèmes comme celui-ci est de tout mettre à jour vers la dernière version publiée. +La non-concordance des arguments est causée par la non-concordance de `graph-ts` et de `matchstick-as`. La meilleure façon de résoudre des problèmes comme celui-ci est de tout mettre à jour vers la dernière version publiée. -## Ressources additionnelles +## Ressources supplémentaires -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). +Pour toute aide supplémentaire, consultez cette [démo de subgraph utilisant Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). ## Réaction From b0b0727367203403547d2bce370157226c5acdf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:46:59 -0500 Subject: [PATCH 0197/1534] New translations unit-testing-framework.mdx (Spanish) --- .../creating/unit-testing-framework.mdx | 132 +++++++++--------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/es/subgraphs/developing/creating/unit-testing-framework.mdx index 750d57c0199f..a9ab2a9ef384 100644 --- a/website/src/pages/es/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Marco de Unit Testing --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew install postgresql ``` -Crea un symlynk al último libpq.5.lib _Es posible que primero debas crear este directorio_ `/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ o /node_modules/gluegun/build/index.js:13 throw up; ``` -Asegúrate de tener una versión más reciente de Node.js, graph-cli ya no es compatible con **v10.19.0**, y esa sigue siendo la versión predeterminada para las nuevas imágenes de Ubuntu en WSL. Por ejemplo, se ha confirmado que Matchstick funciona en WSL con **v18.1.0**, puedes cambiar a él a través de **nvm** o si actualizas su Node.js global. ¡No olvides eliminar `node_modules` y ejecutar `npm install` nuevamente después de actualizar sus nodejs! Luego, asegúrate de tener **libpq** instalado, puedes hacerlo ejecutando +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -Y finalmente, no uses `graph test` (que usa tu instalación global de graph-cli y por alguna razón parece que no funciona en WSL actualmente), en su lugar usa `yarn test` o `npm run test` (que usará la instancia local a nivel de proyecto de graph-cli, que funciona de maravilla). Para eso, por supuesto, necesitarías tener un script `"test"` en tu archivo `package.json` que puede ser algo tan simple como +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ Y finalmente, no uses `graph test` (que usa tu instalación global de graph-cli ### Using Matchstick -Para usar **Matchstick** en tu proyecto de subgrafo simplemente abre una terminal, navega a la carpeta raíz de tu proyecto y simplemente ejecuta `graph test [options] `: descarga el binario **Matchstick** más reciente y ejecuta la prueba especificada o todas las pruebas en una carpeta de prueba (o todas las pruebas existentes si no se especifica un indicador de fuente de datos). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### Opciones CLI @@ -109,35 +109,35 @@ Esto ejecutará solo ese archivo de prueba específico: graph test path/to/file.test.ts ``` -**Opciones:** +**Options:** ```sh --c, --coverage: Ejecuta las pruebas en modo de cobertura. --d, --docker: Ejecuta las pruebas en un contenedor Docker (Nota: Por favor, ejecuta desde la carpeta raíz del subgrafo). --f, --force: Binario: Vuelve a descargar el binario. Docker: Vuelve a descargar el archivo Docker y reconstruye la imagen Docker. --h, --help: Muestra información de uso. --l, --logs: Registra en la consola información sobre el sistema operativo, modelo de CPU y URL de descarga (para fines de depuración). --r, --recompile: Fuerza a que las pruebas se recompilen. --v, --version : Elije la versión del binario de Rust que deseas descargar/utilizar +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### Docker -Desde `graph-cli 0.25.2`, el comando `graph test` admite la ejecución de `matchstick` en un contenedor de Docker con la marca `-d`. La implementación de Docker utiliza [bind-mount](https://docs.docker.com/storage/bind-mounts/) para que no tenga que reconstruir la imagen del Docker cada vez que se ejecuta el comando ` graph test -d`. Como alternativa, puedes seguir las instrucciones del repositorio [matchstick](https://github.com/LimeChain/matchstick#docker-) para ejecutar docker manualmente. +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. ❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ Si ejecutaste previamente `graph test`, es posible que encuentres el siguiente error durante la compilación de docker: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -En este caso, crea un `.dockerignore` en la carpeta raíz y agrega `node_modules/binary-install-raw/bin` +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### Configuración -Matchstick se puede configurar para usar pruebas personalizadas, bibliotecas y una ruta de manifesto personalizada a través del archivo de configuración `matchstick.yaml`: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,11 +147,11 @@ manifestPath: path/to/subgraph.yaml ### Subgrafo de demostración -Puedes probar y jugar con los ejemplos de esta guía clonando el [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Tutoriales en vídeo -También puedes ver la serie de videos en ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -161,9 +161,9 @@ _**IMPORTANT: The test structure described below depens on `matchstick-as` versi `describe(name: String , () => {})` - Defines a test group. -**_Notas:_** +**_Notes:_** -- _Las descripciones no son obligatorias. Todavía puedes usar test() a la antigua usanza, fuera de los bloques describe()_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ Ejemplo: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -Ejemplo anidado de `describe()`: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - Define un caso de prueba. Puedes usar test() dentro de los bloques describe() o de forma independiente. +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. Ejemplo: @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Ejecuta un bloque de código antes de cualquiera de las pruebas del archivo. Si `beforeAll` se declara dentro de un bloque `describe`, se ejecuta al principio de ese bloque `describe`. +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. Ejemplos: -El código dentro de `afterAll` se ejecutará una vez después de _todas_ las pruebas en el archivo. +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -El código dentro de `beforeAll` se ejecutará una vez antes de todas las pruebas en el primer bloque describe +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Ejecuta un bloque de código después de todas las pruebas del archivo. Si `afterAll` se declara dentro de un bloque `describe`, se ejecuta al final de ese bloque `describe`. +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. Ejemplo: -El código dentro de `afterAll` se ejecutará una vez después de _all_ las pruebas en el archivo. +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -El código dentro de `afterAll` se ejecutará una vez después de todas las pruebas en el primer bloque `describe` +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Ejecuta un bloque de código antes de cada prueba. Si `beforeEach` se declara dentro de un bloque `describe`, se ejecuta antes de cada prueba en ese bloque `describe`. +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -Ejemplos: el código dentro de `beforeEach` se ejecutará antes de cada prueba. +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -El código dentro de `beforeEach` se ejecutará solo antes de cada prueba en el describe +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Ejecuta un bloque de código después de cada prueba. Si `afterEach` se declara dentro de un bloque `describe`, se ejecuta después de cada prueba en ese bloque `describe`. +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. Ejemplos: -El código dentro de `afterEach` se ejecutará después de cada prueba. +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -459,7 +459,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -El código dentro de `afterEach` se ejecutará después de cada prueba en ese describe +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -565,7 +565,7 @@ assert.dataSourceExists( ## Escribir un Unit Test -Veamos cómo se vería un unit test simple usando los ejemplos de Gravatar en el [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). Suponiendo que tenemos la siguiente función handler (junto con dos funciones auxiliares para hacernos la vida más fácil): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -652,23 +652,23 @@ test('Next test', () => { }) ``` -¡Eso es mucho para desempacar! En primer lugar, algo importante a tener en cuenta es que estamos importando elementos de `matchstick-as`, nuestra biblioteca auxiliar de AssemblyScript (distribuida como un módulo npm). Puede encontrar el repositorio [aquí](https://github.com/LimeChain/matchstick-as). `matchstick-as` nos proporciona métodos de prueba útiles y también define la función `test()` que usaremos para construir nuestros bloques de prueba. El resto es bastante sencillo: esto es lo que sucede: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - Estamos configurando nuestro estado inicial y agregando una entidad Gravatar personalizada; -- Definimos dos objetos de evento `NewGravatar` junto con sus datos, usando la función `createNewGravatarEvent()`; -- Estamos llamando a los métodos handler para esos eventos - `handleNewGravatars()` y pasando la lista de nuestros eventos personalizados; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - Hacemos valer el estado del almacén. ¿Cómo funciona eso? - Pasamos una combinación única de tipo de Entidad e id. A continuación, comprobamos un campo específico de esa Entidad y afirmamos que tiene el valor que esperamos que tenga. Hacemos esto tanto para la Entidad Gravatar inicial que añadimos al almacén, como para las dos entidades Gravatar que se añaden cuando se llama a la función del handler; -- Y, por último, estamos limpiando el store usando `clearStore()` para que nuestra próxima prueba pueda comenzar con un objeto store nuevo y vacío. Podemos definir tantos bloques de prueba como queramos. +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. Ahí vamos: ¡hemos creado nuestra primera prueba! 👏 Ahora, para ejecutar nuestras pruebas, simplemente necesitas ejecutar lo siguiente en la carpeta raíz de tu subgrafo: -`prueba graph Gravity` +`graph test Gravity` Y si todo va bien, deberías ser recibido con lo siguiente: -![Matchstick diciendo "¡Todas las pruebas pasaron!"](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## Escenarios de prueba comunes @@ -754,11 +754,11 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### Mocking de archivos IPFS (desde matchstick 0.4.1) -Los usuarios pueden mock archivos IPFS usando la función `mockIpfsFile(hash, filePath)`. La función acepta dos argumentos, el primero es el hash/ruta del archivo IPFS y el segundo es la ruta a un archivo local. +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -NOTA: Al probar `ipfs.map/ipfs.mapJSON`, la función callback debe exportarse desde el archivo de prueba para que matchstck la detecte, como la función `processGravatar()` en el siguiente ejemplo de prueba: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -archivo `.test.ts`: +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -archivo `utils.ts`: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Ejecutar la función assert.fieldEquals() verificará la igualdad del campo dado contra el valor esperado dado. La prueba fallará y se generará un mensaje de error si los valores **NO** son iguales. De lo contrario, la prueba pasará con éxito. +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### Interactuar con metadatos de eventos -Los usuarios pueden usar metadatos de transacción predeterminados, que podrían devolverse como un evento de ethereum mediante el uso de la función `newMockEvent()`. El siguiente ejemplo muestra cómo puedes leer/escribir en esos campos en el objeto Event: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Read @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Afirmar que una Entidad **no** está en el almacenamiento +### Asserting that an Entity is **not** in the store Los usuarios pueden afirmar que una entidad no existe en el almacenamiento. La función toma un tipo de entidad y una identificación. Si la entidad está de hecho en el almacenamiento, la prueba fallará con un mensaje de error relevante. Aquí hay un ejemplo rápido de cómo usar esta funcionalidad: @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### Probar fuentes de datos dinámicas -La prueba de las fuentes de datos dinámicas se puede realizar simulando el valor de retorno de las funciones `context()`, `address()` y `network()` del dataSource namespace. Estas funciones actualmente devuelven lo siguiente: `context()` - devuelve una entidad vacía (DataSourceContext), `address()` - devuelve `0x00000000000000000000000000000000000000000`, ` network()` - devuelve `mainnet`. Las funciones `create(...)` y `createWithContext(...)` son mocked para no hacer nada, por lo que no es necesario llamarlas en las pruebas. Los cambios en los valores devueltos se pueden realizar a través de las funciones del espacio de nombres `dataSourceMock` en `matchstick-as` (versión 0.3.0+). +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). Ejemplo a continuación: @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1289,29 +1289,29 @@ test('file/ipfs dataSource creation example', () => { ## Cobertura de prueba -Usando **Matchstick**, los desarrolladores de subgrafos pueden ejecutar un script que calculará la cobertura de las pruebas unitarias escritas. +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### Prerrequisitos +### Prerequisites -Para ejecutar la funcionalidad de cobertura de prueba proporcionada en **Matchstick**, hay algunas cosas que debe preparar de antemano: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### Exporta tus handlers -Para que **Matchstick** verifique qué handlers se están ejecutando, esos handlers deben exportarse desde el **archivo de prueba**. Entonces, por ejemplo, en nuestro archivo gravity.test.ts tenemos el siguiente handler que se está importando: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -Para que esa función sea visible (para que se incluya en el archivo `wat` **por nombre**) también debemos exportarla, como este: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### Uso +### Usage Una vez que esté todo configurado, para ejecutar la herramienta de cobertura de prueba, simplemente ejecuta: @@ -1319,7 +1319,7 @@ Una vez que esté todo configurado, para ejecutar la herramienta de cobertura de graph test -- -c ``` -También puedes agregar un comando `coverage` personalizado a tu archivo `package.json`, así: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1371,7 +1371,7 @@ Global test coverage: 22.2% (2/9 handlers). La salida del log incluye la duración de la ejecución de la prueba. Aquí hay un ejemplo: -`[Thu, 31 Mar 2022 13:54:54 +0300] Programa ejecutado en: 42.270ms.` +`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` ## Errores comunes del compilador @@ -1391,7 +1391,7 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -La falta de coincidencia en los argumentos se debe a la falta de coincidencia en `graph-ts` y `matchstick-as`. La mejor manera de solucionar problemas como este es actualizar todo a la última versión publicada. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. ## Recursos Adicionales From 4e111c1b0a28835078db1ef57dc230ade2f43a88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:01 -0500 Subject: [PATCH 0198/1534] New translations unit-testing-framework.mdx (Arabic) --- .../creating/unit-testing-framework.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ar/subgraphs/developing/creating/unit-testing-framework.mdx index 854e4750cbce..e72d68bef7c8 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: اختبار وحدة Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1293,7 +1293,7 @@ Using **Matchstick**, subgraph developers are able to run a script that will cal The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### المتطلبات الأساسية +### Prerequisites To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: @@ -1311,7 +1311,7 @@ In order for that function to be visible (for it to be included in the `wat` fil export { handleNewGravatar } ``` -### الاستخدام +### Usage Once that's all set up, to run the test coverage tool, simply run: From 41f109198bccea21dccaa2978b6d29c8ed845ade Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:02 -0500 Subject: [PATCH 0199/1534] New translations unit-testing-framework.mdx (Czech) --- .../creating/unit-testing-framework.mdx | 136 +++++++++--------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/cs/subgraphs/developing/creating/unit-testing-framework.mdx index e20ab984b5ca..fd0130dd672a 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Rámec pro testování jednotek --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew instalovat postgresql ``` -Vytvoření symlinku na nejnovější verzi libpq.5.lib _Musíte nejprve vytvořit tento adresář_ `/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ nebo /node_modules/gluegun/build/index.js:13 throw up; ``` -Ujistěte se, že používáte novější verzi Node.js graph-cli již nepodporuje **v10.19.0** a tato verze je stále výchozí pro nové obrazy Ubuntu na WSL. Například je potvrzeno, že Matchstick funguje na WSL s **v18.1.0**, můžete na něj přepnout buď přes **nvm**, nebo pokud aktualizujete globální Node.js. Nezapomeňte smazat `node_modules` a po aktualizaci nodejs znovu spustit `npm install`! Poté se ujistěte, že máte nainstalovaný **libpq**, což můžete provést spuštěním příkazu +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -A konečně, nepoužívejte `graph test` (který používá globální instalaci graph-cli a z nějakého důvodu to vypadá, že je na WSL momentálně nefunkční), místo toho použijte `yarn test` nebo `npm run test` (které použijí lokální instanci graph-cli na úrovni projektu, což funguje jako kouzlo). K tomu byste samozřejmě potřebovali mít v souboru `package.json` skript `"test"`, což může být něco tak jednoduchého, jako např +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ A konečně, nepoužívejte `graph test` (který používá globální instalaci ### Using Matchstick -Pro použití **Matchsticku** v projektu subgrafu stačí otevřít terminál, přejít do kořenové složky projektu a jednoduše spustit `graph test [options] ` - stáhne se nejnovější binární soubor **Matchsticku** a spustí se zadaný test nebo všechny testy ve složce testů (nebo všechny existující testy, pokud není zadán příznak datasource). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### Možnosti CLI @@ -109,35 +109,35 @@ Spustí se pouze tento konkrétní testovací soubor: graph test path/to/file.test.ts ``` -**Možnosti:** +**Options:** ```sh --c, --coverage Spustit testy v režimu pokrytí --d, --docker Spustit testy v kontejneru docker (Poznámka: Spusťte z kořenové složky podgrafu). --f, --force Binární: Znovu stáhne binární soubor. Docker: Znovu stáhne soubor dockeru a obnoví obraz dockeru. --h, --help Zobrazí informace o použití --l, --logs Zaznamená do konzole informace o operačním systému, modelu procesoru a url adrese pro stahování (pro účely ladění). --r, --recompile Vynutí překompilování testů. --v, --version Zvolte verzi binárního souboru rust, která se má stáhnout/použít +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### Docker -Od verze `graph-cli 0.25.2` podporuje příkaz `graph test` spuštění `matchstick` v kontejneru docker s příznakem `-d`. Implementace dockeru používá příkaz [bind mount](https://docs.docker.com/storage/bind-mounts/), takže nemusí při každém spuštění příkazu `graph test -d` obnovovat obraz dockeru. Případně můžete postupovat podle pokynů z repozitáře [matchstick](https://github.com/LimeChain/matchstick#docker-) a spustit docker ručně. +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. ❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ Pokud jste dříve spustili `graph test`, můžete se při sestavování dockeru setkat s následující chybou: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh - chyba odesílatele: nepodařilo se provést xattr node_modules/binary-install-raw/binary/binary-: oprávnění odepřeno + error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -V tomto případě vytvořte v kořenové složce `.dockerignore` a přidejte `node_modules/binary-install-raw/bin` +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### Konfigurace -Matchstick lze nakonfigurovat tak, aby používal vlastní testy, knihovny a cestu k manifestu prostřednictvím konfiguračního souboru `matchstick.yaml`: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,11 +147,11 @@ manifestPath: path/to/subgraph.yaml ### Ukázkový podgraf -Příklady z této příručky si můžete vyzkoušet a pohrát si s nimi naklonováním repozitáře [Ukázkový podgraf](https://github.com/LimeChain/demo-subgraph) +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Videonávody -Můžete se také podívat na sérii videí ["Jak používat Matchstick k psaní unit test pro vaše podgrafy"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -159,11 +159,11 @@ _**IMPORTANT: The test structure described below depens on `matchstick-as` versi ### describe() -`describe(name: String , () => {})` - Definuje skupinu test. +`describe(name: String , () => {})` - Defines a test group. -**_Poznámky:_** +**_Notes:_** -- _Popisy nejsou povinné. Test() můžete stále používat starým způsobem, mimo bloky describe()_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ Příklad: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -Vnořený příklad `describe()`: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - Definuje případ test. Funkci test() můžete použít uvnitř bloků describe() nebo samostatně. +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. Příklad: @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Spustí blok kódu před kterýmkoli testem v souboru. Pokud je `beforeAll` deklarováno uvnitř bloku `describe`, spustí se na začátku tohoto bloku `describe`. +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. Příklady: -Kód uvnitř `beforeAll` se provede jednou před _všemi_ testy v souboru. +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -Kód uvnitř `beforeAll` se provede jednou před všemi testy v prvním bloku popisu +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Spustí blok kódu po všech test v souboru. Pokud je `afterAll` deklarováno uvnitř bloku `describe`, spustí se na konci tohoto bloku `describe`. +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. Příklad: -Kód uvnitř `afterAll` se provede jednou po _všech_ testech v souboru. +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -Kód uvnitř `afterAll` se provede jednou po všech test v prvním bloku popisu +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Před každým test spustí blok kódu. Pokud je `beforeEach` deklarován uvnitř bloku `describe`, spustí se před každým test v tomto bloku `describe`. +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -Příklady: Příklady: Kód uvnitř `beforeEach` se provede před každým test. +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -Kód uvnitř `beforeEach` se provede pouze před každým test v tomto popisu +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Po každém test spustí blok kódu. Pokud je `afterEach` deklarován uvnitř bloku `describe`, spustí se po každém test v tomto bloku `describe`. +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. Příklady: -Kód uvnitř `afterEach` se provede po každém test. +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -459,7 +459,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -Kód uvnitř `afterEach` se provede po každém test v tomto popisu +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -565,7 +565,7 @@ assert.dataSourceExists( ## Napsat jednotkový test -Podívejme se, jak by vypadal jednoduchý jednotkový test s použitím příkladů Gravatar v [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). Předpokládejme, že máme následující obslužnou funkci (spolu se dvěma pomocnými funkcemi, které nám usnadní život): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -652,23 +652,23 @@ test('Next test', () => { }) ``` -To je spousta věcí, které je třeba vybalit! Nejprve je důležité si všimnout, že importovat věci z `matchstick-as`, naší pomocné knihovny AssemblyScript (distribuované jako modul npm). Repozitář najdete [zde](https://github.com/LimeChain/matchstick-as). Knihovna `matchstick-as` nám poskytuje užitečné testovací metody a také definuje funkci `test()`, kterou budeme používat k sestavování našich testovacích bloků. Zbytek je poměrně jednoduchý - zde je uvedeno, co se stane: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - Nastavujeme počáteční stav a přidáváme jednu vlastní entita Gravatar; -- Pomocí funkce `createNewGravatarEvent()` definujeme dva objekty událostí `NewGravatar` a jejich data; -- Voláme metody obsluhy těchto událostí - `handleNewGravatars()` a předáváme seznam našich vlastních událostí; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - Ujišťujeme se o stavu obchodu. Jak to funguje? - Předáváme jedinečnou kombinaci typu Entity a id. Pak zkontrolujeme konkrétní pole této entity a potvrdíme, že má hodnotu, kterou očekáváme. Toto provádíme jak pro počáteční Entitu Gravatar, kterou jsme přidali do úložiště, tak pro dvě entity Gravatar, které se přidají při volání funkce obsluhy; -- A nakonec - vyčistíme úložiště pomocí `clearStore()`, aby náš další test mohl začít s novým a prázdným objektem úložiště. Můžeme definovat libovolný počet testovacích bloků. +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. A je to tady - vytvořili jsme první test! 👏 Pro spuštění našich testů nyní stačí v kořenové složce podgrafu spustit následující příkaz: -`test graf gravitace` +`graph test Gravity` A pokud vše proběhne v pořádku, měli byste se setkat s následujícím: -![Matchstick říká: "Všechny testy splněny!"](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## Běžné testovací scénáře @@ -754,11 +754,11 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### Zesměšňování souborů IPFS (od matchstick 0.4.1) -Uživatelé mohou soubory IPFS napodobovat pomocí funkce `mockIpfsFile(hash, filePath)`. Funkce přijímá dva argumenty, prvním je hash/cesta k souboru IPFS a druhým je cesta k místnímu souboru. +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -POZNÁMKA: Při testování `ipfs.map/ipfs.mapJSON` musí být funkce zpětného volání exportována z testovacího souboru, aby ji matchstck detekoval, jako například funkci `processGravatar()` v testovacím příkladu níže: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -`.test.ts` soubor +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` soubor +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Spuštěním funkce assert.fieldEquals() se zkontroluje rovnost zadaného pole se zadanou očekávanou hodnotou. Pokud se hodnoty **NENÍ** rovnají, test selže a vypíše se chybové hlášení. V opačném případě test úspěšně projde. +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### Interakce s metadaty událostí -Uživatelé mohou použít výchozí metadata transakce, která mohou být vrácena jako ethereum.Event pomocí funkce `newMockEvent()`. Následující příklad ukazuje, jak lze číst/zapisovat do těchto polí na objektu Event: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Read @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Tvrzení, že entita **není** v úložišti +### Asserting that an Entity is **not** in the store Uživatelé mohou tvrdit, že entita v úložišti neexistuje. Funkce přebírá typ entity a id. Pokud se entita v úložišti skutečně nachází, test selže s příslušným chybovým hlášením. Zde je rychlý příklad použití této funkce: @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### Testování dynamických zdrojů dat -Testování dynamických zdrojů dat lze provést pomocí posměchu návratové hodnoty funkcí `context()`, `address()` a `network()` oboru názvů dataSource. Tyto funkce v současné době vracejí následující hodnoty: `context()` - vrací prázdnou entitu (DataSourceContext), `address()` - vrací `0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000`, `network()` - vrací `mainnet`. a `createWithContext(...)` jsou zesměšněny tak, že nic nedělají, takže je v testech vůbec není třeba volat. Změny návratových hodnot lze provádět prostřednictvím funkcí jmenného prostoru `dataSourceMock` v `matchstick-as` (verze 0.3.0+). +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). Příklad níže: @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1289,29 +1289,29 @@ test('file/ipfs dataSource creation example', () => { ## Pokrytí test -Pomocí nástroje **Matchstick** mohou vývojáři podgrafů spustit skript, který vypočítá pokrytí testů napsaných jednotkových test. +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -Nástroj pro pokrytí test vezme zkompilované binární soubory `wasm` a převede je na soubory `wat`, které lze poté snadno zkontrolovat a zjistit, zda byly zavolány obslužné programy definované v souboru `subgraph.yaml`. Vzhledem k tomu, že pokrytí kódu (a testování jako celek) je v jazycích AssemblyScript a WebAssembly ve velmi rané fázi, nemůže **Matchstick** kontrolovat pokrytí větví. Místo toho se spoléháme na tvrzení, že pokud byla daná obslužná funkce zavolána, byly událost/funkce pro ni správně zesměšněny. +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### Požadavky +### Prerequisites -Chcete-li spustit funkci pokrytí test poskytovanou v nástroji **Matchstick**, musíte si předem připravit několik věcí: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### Exportování zpracovatelů -Aby **Matchstick** mohl zkontrolovat, které obslužné programy jsou spuštěny, musí být tyto obslužné programy exportovány ze souboru **test**. Takže například v našem příkladu máme v souboru gravity.test.ts importován následující handler: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -Aby byla tato funkce viditelná (aby byla obsažena **podle názvu**), musíme ji také exportovat, například takto: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### Použití +### Usage Jakmile je vše nastaveno, spusťte nástroj pro pokrytí test: @@ -1319,7 +1319,7 @@ Jakmile je vše nastaveno, spusťte nástroj pro pokrytí test: graph test -- -c ``` -Do souboru `package.json` můžete také přidat vlastní příkaz `coverage`, například takto: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1391,7 +1391,7 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -Neshoda v argumentech je způsobena neshodou v `graph-ts` a `matchstick-as`. Nejlepší způsob, jak opravit problémy, jako je tento, je aktualizovat vše na nejnovější vydanou verzi. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. ## Další zdroje From 770b0462a3094cb5b3e857eb438c8ea9371b72e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:03 -0500 Subject: [PATCH 0200/1534] New translations unit-testing-framework.mdx (German) --- .../creating/unit-testing-framework.mdx | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/de/subgraphs/developing/creating/unit-testing-framework.mdx index 591b55feda38..52f7cc2134b8 100644 --- a/website/src/pages/de/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/unit-testing-framework.mdx @@ -1,15 +1,15 @@ --- -title: Unit Testing Framework +title: Rahmen für Einheitstests --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. - It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. -## Getting Started +## Erste Schritte ### Install Dependencies @@ -49,13 +49,13 @@ sudo apt install postgresql ### Using WSL (Windows Subsystem for Linux) -You can use Matchstick on WSL both using the Docker approach and the binary approach. As WSL can be a bit tricky, here's a few tips in case you encounter issues like +Sie können Matchstick auf WSL sowohl mit dem Docker-Ansatz als auch mit dem binären Ansatz verwenden. Da WSL ein wenig knifflig sein kann, hier ein paar Tipps, falls Sie auf Probleme stoßen wie ``` -static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = +static BYTES = Symbol(„Bytes“) SyntaxError: Unerwartetes Token = ``` -or +oder ``` /node_modules/gluegun/build/index.js:13 throw up; @@ -89,21 +89,21 @@ And finally, do not use `graph test` (which uses your global installation of gra To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). -### CLI options +### CLI-Optionen -This will run all tests in the test folder: +Dadurch werden alle Tests im Testordner ausgeführt: ```sh graph test ``` -This will run a test named gravity.test.ts and/or all test inside of a folder named gravity: +Dies führt einen Test namens gravity.test.ts und/oder alle Tests in einem Ordner namens gravity aus: ```sh graph test gravity ``` -This will run only that specific test file: +Dadurch wird nur diese spezielle Testdatei ausgeführt: ```sh graph test path/to/file.test.ts @@ -135,7 +135,7 @@ From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` -### Configuration +### Konfiguration Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: @@ -145,15 +145,15 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo-Subgraph You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) -### Video tutorials +### Video-Tutorials Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) -## Tests structure +## Struktur der Tests _**IMPORTANT: The test structure described below depens on `matchstick-as` version >=0.5.0**_ @@ -218,7 +218,7 @@ describe("handleNewGravatar()", () => { }) ``` -or +oder ```typescript test("handleNewGravatar() should create a new entity", () => { @@ -234,7 +234,7 @@ test("handleNewGravatar() should create a new entity", () => { Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. -Examples: +Beispiele: Code inside `beforeAll` will execute once before _all_ tests in the file. @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -418,7 +418,7 @@ describe('handleUpdatedGravatars', () => { Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. -Examples: +Beispiele: Code inside `afterEach` will execute after every test. @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -921,7 +921,7 @@ test( If the test is marked with shouldFail = true but DOES NOT fail, that will show up as an error in the logs and the test block will fail. Also, if it's marked with shouldFail = false (the default state), the test executor will crash. -### Logging +### Protokollierung Having custom logs in the unit tests is exactly the same as logging in the mappings. The difference is that the log object needs to be imported from matchstick-as rather than graph-ts. Here's a simple example with all non-critical log types: @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1393,7 +1393,7 @@ This means you have used `console.log` in your code, which is not supported by A The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. -## Additional Resources +## Zusätzliche Ressourcen For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). From dbd925df7f27d661aceb75f4c0df6dbf73a8529d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:04 -0500 Subject: [PATCH 0201/1534] New translations unit-testing-framework.mdx (Italian) --- .../creating/unit-testing-framework.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/it/subgraphs/developing/creating/unit-testing-framework.mdx index 591b55feda38..77496e8eb092 100644 --- a/website/src/pages/it/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,14 +2,14 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick - It's written in Rust and optimized for high performance. - It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. -## Getting Started +## Per cominciare ### Install Dependencies @@ -55,7 +55,7 @@ You can use Matchstick on WSL both using the Docker approach and the binary appr static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = ``` -or +oppure ``` /node_modules/gluegun/build/index.js:13 throw up; @@ -218,7 +218,7 @@ describe("handleNewGravatar()", () => { }) ``` -or +oppure ```typescript test("handleNewGravatar() should create a new entity", () => { @@ -234,7 +234,7 @@ test("handleNewGravatar() should create a new entity", () => { Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. -Examples: +Esempi: Code inside `beforeAll` will execute once before _all_ tests in the file. @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -418,7 +418,7 @@ describe('handleUpdatedGravatars', () => { Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. -Examples: +Esempi: Code inside `afterEach` will execute after every test. @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` From 5d0fb632e24b3b723afb123d399458e0294ee8c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:06 -0500 Subject: [PATCH 0202/1534] New translations unit-testing-framework.mdx (Japanese) --- .../creating/unit-testing-framework.mdx | 122 +++++++++--------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ja/subgraphs/developing/creating/unit-testing-framework.mdx index 25c03b18a75f..5a089a93aa50 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: ユニットテストフレームワーク --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: postgresql をインストールします。 ``` -最新の libpq.5.lib へのシンボリック リンクを作成します _最初にこのディレクトリを作成する必要がある場合があります_ `/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = /node_modules/gluegun/build/index.js:13 throw up; ``` -Node.jsの新しいバージョンを使っていることを確認してください graph-cliはもう**v10.19.0** をサポートしておらず、これはまだWSL上の新しいUbuntuイメージのデフォルトバージョンになっています。例えばマッチスティックは**v18.1.0** でWSL上で動作することが確認されており、**nvm** を経由するか、グローバルNode.jsを更新すれば切り替えることができます。nodejsを更新したら、`node_modules`を削除し、`npm install`を再度実行するのを忘れないでください! それから、**libpq** がインストールされていることを確認してください。 +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev (インストール) ``` -最後に、`graph test` (グローバルにインストールされたgraph-cliを使用します。なぜかWSLでは壊れているようです)を使用せず、`yarn test` や `npm run test` (ローカル、プロジェクトレベルのgraph-cliを使用し、魅力的に動作します。)を使用するようにしてください。そのためには、もちろん `"test"` スクリプトを `package.json` ファイルに記述する必要がありますが、これは以下のような簡単なものです。 +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ sudo apt-get install libpq-dev (インストール) ### Using Matchstick -**Matchstick** をサブグラフ・プロジェクトで使用するには、ターミナルを開き、プロジェクトのルート・フォルダに移動して、 `graph test [options] ` と実行するだけで、最新の **Matchstick** バイナリがダウンロードされて、テスト・フォルダにある指定したテストまたは全てのテストが実行されます (datasource flag が指定されていなければ既存の全てのテスト). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI options @@ -109,7 +109,7 @@ gravityのテスト graph test path/to/file.test.ts ``` -**オプション:** +**Options:** ```sh -c, --coverage Run the tests in coverage mode @@ -123,21 +123,21 @@ graph test path/to/file.test.ts ### Docker -`graph-cli 0.25.2` からは、`graph test` コマンドが `-d` フラグの付いた docker コンテナでの `matchstick` の実行をサポートしています。docker の実装では、[bind mount](https://docs.docker.com/storage/bind-mounts/) を使用しているので、`graph test -d` コマンドを実行するたびに docker イメージを再構築する必要はありません。また、[matchstick](https://github.com/LimeChain/matchstick#docker-) リポジトリの説明に従って、手動でDockerを実行することもできます。 +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. ❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ 以前に `graph test` を実行したことがある場合、docker build 中に以下のようなエラーが発生することがあります。 +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh - 送信者からのエラー: xattr node_modules/binary-install-raw/bin/binary- へのアクセスに失敗しました: パーミッションが拒否されました。 + error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -この場合、ルートフォルダに `.dockerignore` を作成し、 `node_modules/binary-install-raw/bin` を追加してください。 +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### コンフィギュレーション -Matchstick は、`matchstick.yaml` 設定ファイルによって、カスタムテスト、ライブラリ、マニフェストのパスを使用するように設定することができます。 +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,11 +147,11 @@ manifestPath: path/to/subgraph.yaml ### デモ・サブグラフ -[Demo Subgraph レポ](https://github.com/LimeChain/demo-subgraph)をクローンすることで、このガイドのサンプルを試したり、遊んだりすることができます。 +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### ビデオチュートリアル -また、[「Matchstickを使ってサブグラフのユニットテストを書く方法」](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h)のビデオシリーズもご覧ください。 +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -161,9 +161,9 @@ _**IMPORTANT: The test structure described below depens on `matchstick-as` versi `describe(name: String , () => {})` - Defines a test group. -**_注:_** +**_Notes:_** -- _ディスクリートは必須ではありません。describe() ブロックの外側で test() を旧来の方法で使用することができます。_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ 例: @@ -203,9 +203,9 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - テスト ケースを定義します。 test() は、describe() ブロック内または独立して使用できます +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. -例: +例: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -ファイル中のどのテストよりも前にコードブロックを実行します。もし `beforeAll` が `describe` ブロックの中で宣言された場合、その `describe` ブロックの先頭で実行されます。 +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. 例 -`beforeAll` 内のコードは、ファイル内の _all_ テストの前に一度だけ実行されます。 +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -`beforeAll` 内のコードは、最初の記述ブロックのすべてのテストの前に一度だけ実行されます。 +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -ファイル内の全てのテストの後にコードブロックを実行します。もし `afterAll` が `describe` ブロックの中で宣言された場合、その `describe` ブロックの最後で実行されます。 +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. -例: +例: -`afterAll` 内のコードは、ファイル内の _all_ テストの後に一度だけ実行されます。 +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -`afterAll`内のコードは、最初の記述ブロックのすべてのテストの後に一度だけ実行されます。 +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -各テストの前にコードブロックを実行します。もし `beforeEach` が `describe` ブロックの中で宣言された場合、その `describe` ブロックの中の各テストの前に実行されます。 +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -例 `beforeEach` 内のコードは、各テストの前に実行されます。 +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -`beforeEach`内のコードは、その記述中の各テストの前にのみ実行されます。 +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -各テストの後にコードブロックを実行します。もし `afterEach` が `describe` ブロックの中で宣言されていれば、その `describe` ブロックの中の各テストの後に実行されます。 +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. -例: +例 -`afterEach`内のコードは、各テスト終了後に実行されます。 +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -459,7 +459,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -`afterEach`内のコードは、その記述の各テストの後に実行されます。 +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -565,7 +565,7 @@ assert.dataSourceExists( ## 単体テストを書く -[Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts) にある Gravatar の例を使って、簡単なユニットテストがどのように見えるか見てみましょう。 +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). 次のようなハンドラ関数があるとします(さらに、生活を便利にするための2つのヘルパー関数もあります)。 @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -652,23 +652,23 @@ test('Next test', () => { }) ``` -このように様々な形で紐解いてみました。まず最初に、重要なことは、AssemblyScript のヘルパーライブラリである `matchstick-as` からインポートしていることです (npm モジュールとして配布されています)。リポジトリは[こちら](https://github.com/LimeChain/matchstick-as)にあります。`matchstick-as` は便利なテストメソッドを提供し、テストブロックを構築するために使用する `test()` 関数を定義しています。残りの部分はとても簡単で、次のようなことが起こります。 +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - 初期状態を設定し、カスタムGravatarエンティティを1つ追加しています; -- `createNewGravatarEvent()` 関数を使用して、2 つの `NewGravatar` イベント オブジェクトとそれらのデータを定義します。 -- これらのイベントのハンドラメソッド - `handleNewGravatars()` を呼び出し、カスタムイベントのリストを渡しています; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - storeの状態をアサートする場合、これはどのように行われるのでしょうか。- Entityの種類とidの一意の組み合わせを渡します。そして、そのEntityの特定のフィールドをチェックし、期待通りの値を持っていることを表明します。これはstoreに追加した最初の Gravatar Entity と、ハンドラ関数が呼び出されたときに追加される 2 つの Gravatar Entity の両方に対して行っているのです。 -- 最後に、`clearStore()`を使ってストアを掃除し、次のテストが新鮮で空のストア・オブジェクトで始められるようにしています。テストブロックは必要に応じていくつでも定義できます。 +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. これで最初のテストが完成しました! 👏 テストを実行するには、サブグラフのルートフォルダで以下を実行する必要があります: -`gravityのテスト` +`graph test Gravity` すべてがうまくいくと、以下のメッセージが表示されます: -![「すべてのテストに合格しました!」というマッチ棒](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## 一般的なテストシナリオ @@ -754,9 +754,9 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### Mocking IPFS files (from matchstick 0.4.1) -`mockIpfsFile(hash, filePath)`関数を使用することにより、IPFSファイルのモックを作成することができます。最初の引数はIPFSファイルのハッシュ/パス、2番目の引数はローカルファイルへのパスです。 +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -注意: `ipfs.map/ipfs.mapJSON` をテストするとき、下記のテスト例の `processGravatar()` 関数のように、コールバック関数は matchstck がそれを検出するためにテストファイルからエクスポートされなければなりません。 +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: `.test.ts` file: @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -`.utils.ts` file: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Assert.fieldEquals()関数を実行すると、指定されたフィールドが指定された期待値と等しいかどうかをチェックします。値が**等しくない** 場合は、テストは失敗し、エラーメッセージが出力されます。それ以外の場合は、テストは正常に通過します。 +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### イベントメタデータとのやりとり -ユーザーは、`newMockEvent()`関数を使用して ethereum.Eventとして返されるデフォルトのトランザクションのメタデータを使用することができます。以下の例では、イベントオブジェクトのこれらのフィールドを読み書きする方法を示しています: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Read @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### エンティティがストアに**ない**ことをアサートする +### Asserting that an Entity is **not** in the store ユーザーは、あるエンティティがストアに存在しないことをアサートできます。この関数は、エンティティタイプと id を受け取ります。エンティティが実際にストア内にある場合、テストは関連するエラーメッセージを表示して失敗します。この機能を使った簡単な例をご紹介します: @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### 動的データソースのテスト -動的なデータソースのテストは、dataSource 名前空間の `context()`, `address()`, `network()` 関数の戻り値をモックすることにより行うことができます。これらの関数は現在、以下のものを返しています。`context()` - 空の実体 (DataSourceContext) を返す、 `address()` - `0x000000000000000000000000` を返す、 `network()` - `mainnet` を返す、です。`create(...)`と`createWithContext(...)`関数は何もしないようにモックされているので、テストの中で呼ばれる必要は全くないでしょう。戻り値の変更は `matchstick-as` (version 0.3.0+) の `dataSourceMock` 名前空間の関数で行うことができます。 +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). 以下はその例です: @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1289,29 +1289,29 @@ test('file/ipfs dataSource creation example', () => { ## テストカバレッジ -**マッチスティック** を使用すると、サブグラフ開発者は、記述された単体テストのテスト カバレッジを計算するスクリプトを実行できます。 +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -テスト カバレッジ ツールは、コンパイルされたテスト `wasm` バイナリを取得して、それらを `wat` ファイルに変換します。このファイルは、`subgraph.yaml` で定義されたハンドラーが呼び出されているかどうかを簡単に検査して確認できます。 AssemblyScript と WebAssembly ではコード カバレッジ (およびテスト全体) が非常に初期段階にあるため、**Matchstick** はブランチ カバレッジをチェックできません。代わりに、特定のハンドラーが呼び出された場合、そのイベント/関数が適切にモック化されているというアサーションに依存します。 +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### 前提条件 +### Prerequisites -**Matchstick** で提供されているテストカバレッジ機能を実行するには、事前に準備しておくことがいくつかあります: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### ハンドラのエクスポート -**Matchstick** がどのハンドラが実行されているかをチェックするために、それらのハンドラは **test file** からエクスポートされる必要があります。例えばこの例では、gravity.test.ts ファイルに次のハンドラがインポートされています: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -その関数が見えるようにする(`wat`ファイル**名前**に含める)には、次のようにエクスポートも必要です。 +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### 使い方 +### Usage 設定が完了したら、テストカバレッジツールを実行するために実行します: @@ -1319,7 +1319,7 @@ export { handleNewGravatar } graph test -- -c ``` -次のように、カスタムの `coverage` コマンドを `package.json` ファイルに追加することもできます。 +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1391,7 +1391,7 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -引数の不一致は、`graph-ts`と`matchstick-as`の不一致によって起こります。このような問題を解決する最善の方法は、すべてを最新のリリース版にアップデートすることです。 +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. ## その他のリソース From 2b4c670cb4cc66e2c5ca3ff7588f038def9dcf50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:07 -0500 Subject: [PATCH 0203/1534] New translations unit-testing-framework.mdx (Korean) --- .../creating/unit-testing-framework.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ko/subgraphs/developing/creating/unit-testing-framework.mdx index 591b55feda38..2133c1d4b5c9 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` From 497993362cdbcc8ba043c98114307fc16f8a5f3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:08 -0500 Subject: [PATCH 0204/1534] New translations unit-testing-framework.mdx (Dutch) --- .../creating/unit-testing-framework.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/nl/subgraphs/developing/creating/unit-testing-framework.mdx index 591b55feda38..2133c1d4b5c9 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` From 98519afe316908e6429d59a758513bd1666076f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:09 -0500 Subject: [PATCH 0205/1534] New translations unit-testing-framework.mdx (Polish) --- .../creating/unit-testing-framework.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/pl/subgraphs/developing/creating/unit-testing-framework.mdx index 591b55feda38..2133c1d4b5c9 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` From 804b3ebb19612dcfd1ef50f0af5b71e4d529fc04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:10 -0500 Subject: [PATCH 0206/1534] New translations unit-testing-framework.mdx (Portuguese) --- .../creating/unit-testing-framework.mdx | 241 +++++++++--------- 1 file changed, 120 insertions(+), 121 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/pt/subgraphs/developing/creating/unit-testing-framework.mdx index 2f12939198a4..e35f904d8508 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Estrutura de Testes de Unidades --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew install postgresql ``` -Crie um symlink ao último libpq.5.lib _Você pode precisar criar este diretório primeiro_ `/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ ou /node_modules/gluegun/build/index.js:13 throw up; ``` -Tenha ciência que está em uma versão mais recente do Node.js. O graph-cli não apoia mais a **v10.19.0**, que ainda é a versão padrão para novas imagens de Ubuntu no WSL. Por exemplo, se o Matchstick é confirmado como funcional no WSL com a **v18.1.0**, pode trocar para essa versão através do **nvm** ou ao atualizar o seu Node.js global. Não se esqueça de apagar o `node_modules` e executar o `npm install` novamente após atualizar o seu nodejs! Depois, garanta que tem o **libpq** instalado. Pode fazer isto ao executar +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -E finalmente, não use o `graph test` (que usa a sua instalação global do graph-cli, e por alguma razão, parece não funcionar no WSL no momento). Em vez disto, use o `yarn test` ou o `npm run test` (que usará a instância local do graph-cli; esta funciona muito bem). Para isto, obviamente precisaria de um script `test` no seu arquivo `package.json`, que pode ser algo simples como +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ E finalmente, não use o `graph test` (que usa a sua instalação global do grap ### Using Matchstick -Para usar o **Matchstick** no seu projeto de subgraph, basta abrir um terminal, navegar à pasta raiz do seu projeto e executar `graph test [options] ` — este baixa o binário mais recente do **Matchstick**, e executa o teste especificado, ou todos os testes especificados em uma pasta de teste (ou todos os testes existentes se não for especificado nenhum flag de fontes de dados). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### Opções de CLI @@ -109,35 +109,35 @@ Isto só executará esse arquivo de teste específico: graph test path/to/file.test.ts ``` -**Opções:** +**Options:** ```sh --c, --coverage Executa os testes em modo de cobertura --d, --docker Executa os testes em um docker container (Nota: Favor executar da pasta raiz do subgraph) --f --force Binário: Baixa o binário novamente. Docker: Baixa o Dockerfile novamente e reconstroi a imagem do docker. --h, --help Mostra informações de uso --l, --logs Mostra no console informações sobre o sistema operacional, modelo de CPU, e URL de download (para propósitos de debugging) --r, --recompile Força os testes a serem recompilados --v, --version Escolhe a versão do binário rust que você deseja baixar/usar +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### Docker -Desde o `graph-cli 0.25.2`, o comando `graph test` apoia a execução do `matchstick` em um container docker com a flag `-d`. A implementação do docker utiliza o [bind mount](https://docs.docker.com/storage/bind-mounts/) para que não precise reconstruir a imagem do docker toda vez que o comando `graph test -d` é executado. Alternativamente, siga as instruções do repositório do [matchstick](https://github.com/LimeChain/matchstick#docker-) para executar o docker manualmente. +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. -❗ `graph test -d` força o `docker run` a ser executado com o flag `-t`. Isto deve ser removido para rodar dentro de ambientes não interativos (como o GitHub CI). +❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ Caso já tenha executado o `graph test` anteriormente, pode encontrar o seguinte erro durante a construção do docker: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -Neste caso, crie um `.dockerignore` na pasta raiz e adicione `node_modules/binary-install-raw/bin` +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### Configuração -O Matchstick pode ser configurado para usar um caminho personalizado de tests, libs e manifest através do arquivo de configuração `matchstick.yaml`: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,23 +147,23 @@ manifestPath: path/to/subgraph.yaml ### Subgraph de demonstração -Podes experimentar e experimentar com os exemplos deste guia ao clonar a repo do [Subgraph de Demonstração](https://github.com/LimeChain/demo-subgraph) +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Tutoriais de vídeo -Também pode conferir a série em vídeo sobre ["Como usar o Matchstick para escrever testes de unidade para os seus subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Estrutura de testes -_**IMPORTANTE: A estrutura de teste descrita abaixo depende da versão >=0.5.0 do `matchstick-as`**_ +_**IMPORTANT: The test structure described below depens on `matchstick-as` version >=0.5.0**_ ### describe() -`describe(name: String , () => {})` — Define um grupo de teste. +`describe(name: String , () => {})` - Defines a test group. -**_Notas:_** +**_Notes:_** -- _Describes (descrições) não são obrigatórias. Ainda pode usar o test() da maneira antiga, fora dos blocos describe()_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ Exemplo: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -Exemplo aninhado de `describe()`: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` — Define um caso de teste. Pode usar o test() em blocos describe() ou de maneira independente. +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. Exemplo: @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Executa um bloco de código antes de quaisquer dos testes no arquivo. Se o `beforeAll` for declarado dentro de um bloco `describe`, ele é executado no começo daquele bloco `describe`. +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. Exemplos: -O código dentro do `beforeAll` será executado uma vez antes de _todos_ os testes no arquivo. +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -O código antes do `beforeAll` será executado uma vez antes de todos os testes no primeiro bloco describe +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Executa um bloco de código depois de todos os testes no arquivo. Se o `afterAll` for declarado dentro de um bloco `describe`, ele será executado no final daquele bloco `describe`. +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. Exemplo: -O código dentro do `afterAll` será executado uma vez depois de _todos_ os testes no arquivo. +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -O código dentro do `afterAll` será executado uma vez depois de todos os testes no bloco describe +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Executa um bloco de código antes de cada teste no arquivo. Se o `beforeEach` for declarado dentro de um bloco `describe`, ele será executado antes de cada teste naquele bloco `describe`. +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -Exemplos: O código dentro do `beforeEach` será executado antes de cada teste. +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -O código antes do `beforeEach` será executado antes de cada teste no describe +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Executa um bloco de código depois de cada teste no arquivo. Se o `afterEach` for declarado dentro de um bloco `describe`, será executado após cada teste naquele bloco `describe`. +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. Exemplos: -O código dentro do `afterEach` será executado após cada teste. +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -459,7 +459,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -O código dentro do `afterEach` será executado após cada teste naquele describe +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -565,7 +565,7 @@ assert.dataSourceExists( ## Como Escrever um Teste de Unidade -Vamos ver como um simples teste de unidade pareceria, com os exemplos do Gravatar no [Subgraph de Demonstração](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). Suponhamos que temos a seguinte função de handler (com duas funções de helper para facilitar): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -627,23 +627,23 @@ import { NewGravatar } from '../../generated/Gravity/Gravity' import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' test('Can call mappings with custom events', () => { - // Cria uma entidade de teste e salve-a no armazenamento como um estado inicial (opcional) + // Create a test entity and save it in the store as initial state (optional) let gravatar = new Gravatar('gravatarId0') gravatar.save() - // Cria eventos falsos + // Create mock events let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - // Chama funções de mapeamento ao passar os eventos que acabamos de criar + // Call mapping functions passing the events we just created handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - // Afirma o estado do armazenamento + // Assert the state of the store assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - // Limpa o armazenamento para começar o próximo teste do zero + // Clear the store in order to start the next test off on a clean slate clearStore() }) @@ -652,13 +652,13 @@ test('Next test', () => { }) ``` -Quanta coisa! Primeiro, note que estamos a impoortar coisas do `matchstick-as`, nossa biblioteca de helper do AssemblyScript (distribuída como um módulo npm). Pode encontrar o repositório [aqui](https://github.com/LimeChain/matchstick-as). `matchstick-as` nos dá alguns métodos de teste úteis e define a função `test()`, que usaremos para construir os nossos blocos de teste. O resto é bem simples — veja o que acontece: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - Configuramos nosso estado inicial e adicionamos uma entidade de Gravatar personalizada; -- Definimos dois eventos `NewGravatar` com os seus dados, usando a função `createNewGravatarEvent()`; -- Chamamos métodos de handlers para estes eventos — `handleNewGravatars()` e passamos a lista dos nossos eventos personalizados; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - Garantimos o estado da loja. Como isto funciona? — Passamos uma combinação do tipo e da id da Entidade. Depois conferimos um campo específico naquela Entidade e garantimos que ela tem o valor que esperamos que tenha. Estamos a fazer isto tanto para a Entidade Gravatar inicial adicionada ao armazenamento, quanto para as duas entidades Gravatar adicionadas ao chamar a função de handler; -- E por último — limpamos o armazenamento com `clearStore()`, para que o nosso próximo teste comece com um objeto de armazenamento novo em folha. Podemos definir quantos blocos de teste quisermos. +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. Prontinho — criamos o nosso primeiro teste! 👏 @@ -668,7 +668,7 @@ Para executar os nossos testes, basta apenas executar o seguinte na pasta raiz d E se tudo der certo, deve receber a seguinte resposta: -![Matchstick diz "All tests passed!" (Todos os testes passados!)](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## Cenários de teste comuns @@ -754,18 +754,18 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### Como simular arquivos IPFS (do matchstick 0.4.1) -Os utilizadores podem simular arquivos IPFS com a função `mockIpfsFile(hash, filePath)`. A função aceita dois argumentos: o primeiro é o hash/caminho do arquivo IPFS, e o segundo é o caminho a um arquivo local. +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -NOTA: Ao testar o `ipfs.map/ipfs.mapJSON`, a função de callback deve ser exportada do arquivo de teste para que o matchstick o detete, como a função `processGravatar()` no exemplo de teste abaixo: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -Arquivo `.test.ts`: +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' import { ipfs } from '@graphprotocol/graph-ts' import { gravatarFromIpfs } from './utils' -// Exporta o callback do ipfs.map() para que o matchstick o detete +// Export ipfs.map() callback in order for matchstck to detect it export { processGravatar } from './utils' test('ipfs.cat', () => { @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -Arquivo `utils.ts`: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -A função assert.fieldEquals() conferirá a igualdade do campo dado contra o valor dado esperado. O teste acabará em erro, com mensagem correspondente, caso os valores **NÃO** sejam iguais. Caso contrário, o teste terá êxito. +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### Como interagir com metadados de Eventos -Os utilizadores podem usar metadados-padrão de transações, que podem ser retornados como um ethereum.Event com a função `newMockEvent()`. O seguinte exemplo mostra como pode ler/escrever a estes campos no objeto de Evento: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Leitura @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Como afirmar que uma Entidade **não** está no armazenamento +### Asserting that an Entity is **not** in the store Os utilizadores podem afirmar que uma entidade não existe no armazenamento. A função toma um tipo e uma id de entidade. Caso a entidade esteja, de facto, na loja, o teste acabará em erro, com uma mensagem de erro relevante. Veja um exemplo rápido de como usar esta funcionalidade: @@ -896,7 +896,7 @@ import { logStore } from 'matchstick-as/assembly/store' logStore() ``` -Desde a versão 0.6.0, o `logStore` não imprime mais campos derivados; em vez disto, utilizadores podem usar a nova função `logEntity`. O `logEntity` pode ser usado para imprimir qualquer entidade, não só as que têm campos derivados. O `logEntity` pega o tipo e a ID da entidade e um flag `showRelated` para indicar se utilizadores querem imprimir as entidades derivadas relacionadas. +As of version 0.6.0, `logStore` no longer prints derived fields, instead users can use the new `logEntity` function. Of course `logEntity` can be used to print any entity, not just ones that have derived fields. `logEntity` takes the entity type, entity id and a `showRelated` flag to indicate if users want to print the related derived entities. ``` import { logEntity } from 'matchstick-as/assembly/store' @@ -907,7 +907,7 @@ logEntity("Gravatar", 23, true) ### Falhas esperadas -Os utilizadores podem encontrar falhas esperadas, com o flag shouldFail nas funções `test()`: +Os utilizadores podem encontrar falhas esperadas, com o flag shouldFail nas funções test(): ```typescript test( @@ -919,7 +919,7 @@ test( ) ``` -Caso o teste seja marcado com `shouldFail = true` mas NÃO falhe, isto será mostrado como um erro nos logs e o bloco de teste não terá êxito. E se for marcado com `shouldFail = false` (o estado normal), o executor de teste travará. +Caso o teste seja marcado com shouldFail = true mas NÃO falhe, isto será mostrado como um erro nos logs e o bloco de teste não terá êxito. E se for marcado com shouldFail = false (o estado normal), o executor de teste travará. ### Logging @@ -960,14 +960,14 @@ Logar erros críticos interromperá a execução dos testes e causará um desast Testar campos derivados permite aos utilizadores configurar um campo numa entidade e atualizar outra automaticamente, caso ela derive um dos seus campos da primeira entidade. -Antes da versão `0.6.0`, era possível resgatar as entidades derivadas ao acessá-las como propriedades ou campos de entidade, como no seguinte exemplo: +Before version `0.6.0` it was possible to get the derived entities by accessing them as entity fields/properties, like so: ```typescript let entity = ExampleEntity.load('id') let derivedEntity = entity.derived_entity ``` -Desde a versão `0.6.0`, isto é feito com a função `loadRelated` do graph-node. As entidades derivadas podem ser acessadas como são nos handlers. +As of version `0.6.0`, this is done by using the `loadRelated` function of graph-node, the derived entities can be accessed the same way as in the handlers. ```typescript test('Derived fields example test', () => { @@ -1009,9 +1009,9 @@ test('Derived fields example test', () => { }) ``` -### Teste de `loadInBlock` +### Testing `loadInBlock` -Desde a versão `0.6.0`, é possível testar o `loadInBlock` com o `mockInBlockStore`, que permite o mocking de entidades no cache de blocos. +As of version `0.6.0`, users can test `loadInBlock` by using the `mockInBlockStore`, it allows mocking entities in the block cache. ```typescript import { afterAll, beforeAll, describe, mockInBlockStore, test } from 'matchstick-as' @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### Como testar fontes de dados dinâmicas -Testar fontes de dados dinâmicas pode ser feito ao falsificar o valor de retorno das funções `context()`, `address()` e `network()` do namespace do dataSource. Estas funções atualmente retornam o seguinte: `context()` — retorna uma entidade vazia (DataSourceContext), `address()` — retorna `0x0000000000000000000000000000000000000000`, `network()` — retorna `mainnet`. As funções `create(...)` e `createWithContext(...)` são falsificadas para não terem uso, para que não precisem ser chamadas nos teste. Dá para mudar os valores de retorno podem através das funções do namespace `dataSourceMock` no `matchstick-as` (versão 0.3.0+). +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). Exemplo abaixo: @@ -1093,49 +1093,48 @@ test('Data source simple mocking example', () => { }) ``` -Note que o `dataSourceMock.resetValues()` é chamado no final. Isto é porque os valores são lembrados quando mudados, e devem ser reconfigurados caso queira voltar aos valores padrão. +Note que o dataSourceMock.resetValues() é chamado no final. Isto é porque os valores são lembrados quando mudados, e devem ser reconfigurados caso queira voltar aos valores padrão. ### Teste de criação de fontes de dados dinâmicas -Desde a versão `0.6.0`, é possível testar se uma nova fonte de dados foi criada de um modelo. Este recurso apoia modelos do ethereum/contract e do file/ipfs. Há quatro funçôes para isto: +As of version `0.6.0`, it is possible to test if a new data source has been created from a template. This feature supports both ethereum/contract and file/ipfs templates. There are four functions for this: -- `assert.dataSourceCount(templateName, expectedCount)` pode ser usado para impor a contagem esperada de fontes de dados do modelo especificado -- `assert.dataSourceExists(templateName, address/ipfsHash)` impõe que foi criada uma fonte de dados com o identificador especificado (pode ser um endereço de contrato ou um hash de arquivo ipfs) de um modelo especificado -- `logDataSources(templateName)` imprime todas as fontes de dados do modelo especificado ao console para propósitos de debugging -- `readFile(path)` lê um arquivo JSON que representa um arquivo IPFS e retorna o conteúdo como Bytes +- `assert.dataSourceCount(templateName, expectedCount)` can be used to assert the expected count of data sources from the specified template +- `assert.dataSourceExists(templateName, address/ipfsHash)` asserts that a data source with the specified identifier (could be a contract address or IPFS file hash) from a specified template was created +- `logDataSources(templateName)` prints all data sources from the specified template to the console for debugging purposes +- `readFile(path)` reads a JSON file that represents an IPFS file and returns the content as Bytes -#### Teste de modelos `ethereum/contract` +#### Testing `ethereum/contract` templates ```typescript test('ethereum/contract dataSource creation example', () => { - // Garanta que não há dataSources criadas do modelo do GraphTokenLockWallet + // Assert there are no dataSources created from GraphTokenLockWallet template assert.dataSourceCount('GraphTokenLockWallet', 0) - // Crie uma nova fonte de dados GraphTokenLockWallet com o endereço - 0xa16081f360e3847006db660bae1c6d1b2e17ec2a + // Create a new GraphTokenLockWallet datasource with address 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A GraphTokenLockWallet.create(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2A')) - // Garanta que a dataSource foi criada + // Assert the dataSource has been created assert.dataSourceCount('GraphTokenLockWallet', 1) - // Adicione uma segunda dataSource com contexto + // Add a second dataSource with context let context = new DataSourceContext() context.set('contextVal', Value.fromI32(325)) GraphTokenLockWallet.createWithContext(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'), context) - // Garanta que agora há 2 dataSources + // Assert there are now 2 dataSources assert.dataSourceCount('GraphTokenLockWallet', 2) - // Garanta que uma dataSource com o endereço "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" foi criada - // Lembre-se que o tipo `Address` foi transformado em letra minúscula após decodificado, então é necessário passar o endereço em letras minúsculas ao garantir que ele existe + // Assert that a dataSource with address "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" was created + // Keep in mind that `Address` type is transformed to lower case when decoded, so you have to pass the address as all lower case when asserting if it exists assert.dataSourceExists('GraphTokenLockWallet', '0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'.toLowerCase()) logDataSources('GraphTokenLockWallet') }) ``` -##### Exemplo de resultado de `logDataSource` +##### Example `logDataSource` output ```bash 🛠 { @@ -1159,11 +1158,11 @@ test('ethereum/contract dataSource creation example', () => { } ``` -#### Teste de modelos `file/ipfs` +#### Testing `file/ipfs` templates -Assim como as fontes dinâmicas de dados de contrato, os utilizadores podem testar fontes de dados de arquivos e os seus handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers -##### Exemplo de `subgraph.yaml` +##### Example `subgraph.yaml` ```yaml ... @@ -1184,7 +1183,7 @@ templates: file: ./abis/GraphTokenLockWallet.json ``` -##### Exemplo de `schema.graphql` +##### Example `schema.graphql` ```graphql """ @@ -1204,7 +1203,7 @@ type TokenLockMetadata @entity { } ``` -##### Exemplo de `metadata.json` +##### Example `metadata.json` ```json { @@ -1219,9 +1218,9 @@ type TokenLockMetadata @entity { ```typescript export function handleMetadata(content: Bytes): void { - // dataSource.stringParams() retorna a CID File DataSource - // haverá um mock de stringParam() no teste de handler - // para mais informações, leia https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files + // dataSource.stringParams() returns the File DataSource CID + // stringParam() will be mocked in the handler test + // for more info https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files let tokenMetadata = new TokenLockMetadata(dataSource.stringParam()) const value = json.fromBytes(content).toObject() @@ -1254,31 +1253,31 @@ import { TokenLockMetadata } from '../../generated/schema' import { GraphTokenLockMetadata } from '../../generated/templates' test('file/ipfs dataSource creation example', () => { - // Gere a CID dataSource do arquivo de caminho ipfsHash + ipfs - // Por exemplo QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json + // Generate the dataSource CID from the ipfsHash + ipfs path file + // For example QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' const CID = `${ipfshash}/example.json` - // Crie uma nova dataSource com a CID gerada + // Create a new dataSource using the generated CID GraphTokenLockMetadata.create(CID) - // Garanta que a dataSource foi criada + // Assert the dataSource has been created assert.dataSourceCount('GraphTokenLockMetadata', 1) assert.dataSourceExists('GraphTokenLockMetadata', CID) logDataSources('GraphTokenLockMetadata') - // Agora tempos que fazer um mock dos metadados da dataSource e especificamente utilizar dataSource.stringParam() - // dataSource.stringParams usa o valor do dataSource.address(), então faremos um mock do endereço com odataSourceMock do matchstick-as - // Primeiro reiniciaremos os valores e depois usaremos o dataSourceMock.setAddress() para configurar a CID + // Now we have to mock the dataSource metadata and specifically dataSource.stringParam() + // dataSource.stringParams actually uses the value of dataSource.address(), so we will mock the address using dataSourceMock from matchstick-as + // First we will reset the values and then use dataSourceMock.setAddress() to set the CID dataSourceMock.resetValues() dataSourceMock.setAddress(CID) - // Agora precisamos gerar os Bytes para repassar ao handler dataSource - // Para este caso, apresentamos uma nova função readFile que lê um json local e retorna o conteúdo como Bytes + // Now we need to generate the Bytes to pass to the dataSource handler + // For this case we introduced a new function readFile, that reads a local json and returns the content as Bytes const content = readFile(`path/to/metadata.json`) handleMetadata(content) - // Agora testaremos se um TokenLockMetadata foi criado + // Now we will test if a TokenLockMetadata was created const metadata = TokenLockMetadata.load(CID) assert.bigIntEquals(metadata!.endTime, BigInt.fromI32(1)) @@ -1290,29 +1289,29 @@ test('file/ipfs dataSource creation example', () => { ## Cobertura de Testes -Com o **Matchstick**, os programadores de subgraph podem executar um script que calcula a cobertura de teste das unidades de teste escritas. +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -A ferramenta de cobertura de testes pega os binários de teste `wasm` compilados e os converte a arquivos `wat`, que podem então ser facilmente vistoriados para ver se os handlers definidos em `subgraph.yaml` foram chamados ou não. Como a cobertura de código (e os testes em geral) está em estado primitivo no AssemblyScript e WebAssembly, o **Matchstick** não pode procurar por coberturas de branch. Em vez disto, presumimos que se um handler foi chamado, o evento/a função correspondente já passou por testes com êxito. +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### Pré-requisitos +### Prerequisites -Para executar a funcionalidade da cobertura de teste providenciada no **Matchstick**, precisa preparar algumas coisas com antecedência: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### Exportar seus handlers -Para que o **Matchstick** confira quais handlers são executados, estes handlers devem ser exportados do **arquivo de teste** primeiro. No nosso exemplo, temos o seguinte handler a ser importado no nosso arquivo gravity.test.ts: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -Para que essa função seja visível (para ser incluída no arquivo `wat` **por nome**) também precisamos exportá-la assim: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### Uso +### Usage Assim que tudo estiver pronto, para executar a ferramenta de cobertura de testes, basta: @@ -1320,7 +1319,7 @@ Assim que tudo estiver pronto, para executar a ferramenta de cobertura de testes graph test -- -c ``` -Também pode adicionar um comando `coverage` personalizado ao seu arquivo `package.json`, assim: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1376,23 +1375,23 @@ A saída do log inclui a duração do teste. Veja um exemplo: ## Erros comuns do compilador -> `Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined` +> Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api) -> `ERROR TS2554: Expected ? arguments, but got ?.` +> ERROR TS2554: Expected ? arguments, but got ?. > -> `return new ethereum.Block(defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt);` +> return new ethereum.Block(defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt); > -> `in ~lib/matchstick-as/assembly/defaults.ts(18,12)` +> in ~lib/matchstick-as/assembly/defaults.ts(18,12) > -> `ERROR TS2554: Expected ? arguments, but got ?.` +> ERROR TS2554: Expected ? arguments, but got ?. > -> `return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt);` +> return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); > -> `in ~lib/matchstick-as/assembly/defaults.ts(24,12)` +> in ~lib/matchstick-as/assembly/defaults.ts(24,12) -A diferença nos argumentos é causada pela diferença no `graph-ts` e no `matchstick-as`. Problemas como este são melhor resolvidos ao atualizar tudo para a versão mais recente. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. ## Outros Recursos From d43c503798e802260e3f1fa76e1062bd035d1a8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:12 -0500 Subject: [PATCH 0207/1534] New translations unit-testing-framework.mdx (Russian) --- .../creating/unit-testing-framework.mdx | 237 +++++++++--------- 1 file changed, 118 insertions(+), 119 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ru/subgraphs/developing/creating/unit-testing-framework.mdx index 404ec4b5d317..a747fd939efb 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Фреймворк модульного тестирования --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew install postgresql ``` -Создайте символическую ссылку на последнюю версию libpq.5.lib _Возможно, сначала Вам потребуется создать этот каталог_ `/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = /node_modules/gluegun/build/index.js:13 throw up; ``` -Пожалуйста, убедитесь, что используете более новую версию Node.js. graph-cli больше не поддерживает **v10.19.0** и по-прежнему является версией по умолчанию для новых образов Ubuntu на WSL. Например, подтверждено, что Matchstick работает на WALL с **v18.1.0**. Вы можете переключиться на него либо через **nvm**, либо, если обновите свой глобальный Node.js. Не забудьте удалить `node_modules` и повторно запустить `npm install` после обновления nodejs! Затем убедитесь, что у Вас установлена **libpq**. Это можно сделать, запустив +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -И, наконец, не применяйте `graph test` (который использует Вашу глобальную установку graph-cli и по какой-то причине в настоящее время выглядит так, как будто он не работает в WSL). Вместо этого примените `yarn test` или `npm run test` (который будет использовать локальный экземпляр graph-cli на уровне проекта, который работает отлично). Для этого Вам, конечно, понадобится скрипт `"test"` в файле `package.json`, который может быть довольно простым, например +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ sudo apt-get install libpq-dev ### Using Matchstick -Чтобы использовать **Matchstick** в своём проекте subgraph, просто откройте терминал, перейдите в корневую папку своего проекта и запустите `graph test [options] ` - он загрузит последний двоичный файл **Matchstick** и запустит указанный тест или все тесты в тестовой папке (или все существующие тесты, если флаг источника данных не указан). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### Параметры CLI @@ -109,35 +109,35 @@ graph test gravity graph test path/to/file.test.ts ``` -**Параметры:** +**Options:** ```sh --c, --coverage Запускает тесты в режиме покрытия --d, --docker Запускает тесты в docker-контейнере (Примечание: пожалуйста, выполняйте из корневой папки субграфа) --f, --force Binary: повторно загружает двоичный файл. Docker: Повторно загружает файл Docker и перестраивает образ docker --h, --help Показывает информацию об использовании --l, --logs Выводит на консоль информацию об операционной системе, модели процессора и URL-адресе загрузки (в целях отладки) --r, --recompile Принудительно перекомпилирует тесты --v, --version Выберите версию бинарного файла rust, которую хотите загрузить/использовать +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### Docker -Из `graph-cli 0.25.2` команда `graph test` поддерживает запуск `matchstick` в контейнере docker с флагом `-d`. Реализация docker использует [bind mount](https://docs.docker.com/storage/bind-mounts /), чтобы не приходилось перестраивать образ docker каждый раз, когда выполняется команда `graph test -d`. В качестве альтернативы Вы можете следовать инструкциям из репозитория [matchstick](https://github.com/LimeChain/matchstick#docker-) для запуска docker вручную. +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. -❗ Команда `graph test -d` принудительно запускает `docker run` с флагом `-t`. Этот флаг необходимо удалить для запуска в неинтерактивных средах (таких, например, как GitHub CI). +❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ Если Вы ранее запускали `graph test`, Вы можете столкнуться со следующей ошибкой во время сборки docker: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -В этом случае создайте в корневой папке `.dockerignore` и добавьте `node_modules/binary-install-raw/bin` +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### Конфигурация -Matchstick можно настроить на использование пользовательских тестов, библиотек и пути к манифесту через файл конфигурации `matchstick.yaml`: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,23 +147,23 @@ manifestPath: path/to/subgraph.yaml ### Демонстрационный субграф -Вы можете попробовать и поиграть с примерами из этого руководства, клонировав [Демонстрационный репозиторий субграфов](https://github.com/LimeChain/demo-subgraph) +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Видеоуроки -Также Вы можете посмотреть серию видеороликов [>"Как использовать Matchstick для написания модульных тестов для Ваших субграфов"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Структура тестов -_**ВАЖНО: Описанная ниже тестовая структура зависит от версии `matchstick-as` >=0.5.0**_ +_**IMPORTANT: The test structure described below depens on `matchstick-as` version >=0.5.0**_ ### describe() -`describe(name: String , () => {})` - Определяет тестовую группу. +`describe(name: String , () => {})` - Defines a test group. -**_Примечания:_** +**_Notes:_** -- _Описания не являются обязательными. Вы по-прежнему можете использовать test() как и раньше, вне блоков describe()_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ Пример: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -Пример вложенной функции `describe()`: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - Определяет тестовый пример. Вы можете использовать test() внутри блоков describe() или независимо друг от друга. +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. Пример: @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Запускает блок кода перед любым из тестов в файле. Если `beforeAll` объявлен внутри блока `describe`, он запускается в начале этого блока `describe`. +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. Примеры: -Код внутри `beforeAll` будет выполнен один раз перед _всеми_ тестами в файле. +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -Код внутри `beforeAll` будет выполняться один раз перед всеми тестами в первом блоке описания +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Запускает блок кода после выполнения всех тестов в файле. Если `afterAll` объявлен внутри блока `describe`, он запускается в конце этого блока `describe`. +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. Пример: -Код внутри `afterAll` будет выполнен один раз после _всех_ тестов в файле. +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -Код внутри `afterAll` будет выполнен один раз после всех тестов в первом блоке описания +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Запускает блок кода перед каждым тестом. Если `beforeEach` объявлен внутри блока `describe`, он запускается перед каждым тестом в этом блоке `describe`. +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -Примеры: Код внутри `beforeEach` будет выполняться перед каждым тестированием. +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -Код внутри `beforeEach` будет выполняться только перед каждым тестом в описании +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,10 +392,10 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - // код, который должен обновить displayName до 1-го Gravatar + // code that should update the displayName to 1st Gravatar assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') store.remove('Gravatar', '0x0') @@ -404,7 +404,7 @@ describe('handleUpdatedGravatars', () => { test('Updates the imageUrl', () => { assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - // код, который должен изменить imageUrl на https://www.gravatar.com/avatar/0x0 + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') store.remove('Gravatar', '0x0') @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Запускает блок кода после каждого теста. Если `afterEach` объявлен внутри блока `describe`, он запускается после каждого теста в этом блоке `describe`. +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. Примеры: -Код внутри `afterEach` будет выполняться после каждого теста. +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,10 +441,10 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // код, который должен обновить displayName до 1-го Gravatar + // code that should update the displayName to 1st Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -452,14 +452,14 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // код, который должен изменить imageUrl на https://www.gravatar.com/avatar/0x0 + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -Код внутри `afterEach` будет выполняться после каждого теста в этом описании +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,10 +481,10 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // код, который должен обновить displayName до 1-го Gravatar + // code that should update the displayName to 1st Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -492,7 +492,7 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // код, который должен изменить imageUrl на https://www.gravatar.com/avatar/0x0 + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) @@ -565,7 +565,7 @@ assert.dataSourceExists( ## Напишите юнит-тест -Давайте посмотрим, как будет выглядеть простой юнит-тест, используя примеры Gravatar в [Демонстрационном субграфе](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). Предположим, у нас есть следующая функция-обработчик (наряду с двумя вспомогательными функциями, облегчающими нашу жизнь): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -627,23 +627,23 @@ import { NewGravatar } from '../../generated/Gravity/Gravity' import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' test('Can call mappings with custom events', () => { - // Создайте тестовый объект и сохраните его в хранилище как исходное состояние (необязательно) + // Create a test entity and save it in the store as initial state (optional) let gravatar = new Gravatar('gravatarId0') gravatar.save() - // Создайте фиктивные события + // Create mock events let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - // Вызовите функции мэппинга, передающие события, которые мы только что создали + // Call mapping functions passing the events we just created handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - // Подтвердите состояние хранилища + // Assert the state of the store assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - // Очистите хранилище, чтобы начать следующий тест с чистого листа + // Clear the store in order to start the next test off on a clean slate clearStore() }) @@ -652,13 +652,13 @@ test('Next test', () => { }) ``` -Предстоит очень многое распаковать! Прежде всего, важно отметить, что мы импортируем данные из `matchstick-as`, нашей вспомогательной библиотеки AssemblyScript (распространяемой как модуль npm). Репозиторий Вы можете найти [здесь](https://github.com/LimeChain/matchstick-as). `matchstick-as` предоставляет нам полезные методы тестирования, а также определяет функцию `test()`, которую мы будем использовать для построения наших тестовых блоков. В остальном все довольно просто - вот что происходит: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - Мы настраиваем наше исходное состояние и добавляем один пользовательский объект Gravatar; -- Мы определяем два объекта события `NewGravatar` вместе с их данными, используя функцию `create New Gravatar Event()`; -- Мы вызываем методы-обработчики этих событий - `обрабатываем новые Gravatars()` и передаем список наших пользовательских событий; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - Мы утверждаем состояние хранилища. Как это происходит? - Мы передаем уникальную комбинацию типа объекта и идентификатора. Затем мы проверяем конкретное поле в этом объекте и утверждаем, что оно имеет то значение, которое мы ожидаем от него получить. Мы делаем это как для исходного объекта Gravatar, который мы добавили в хранилище, так и для двух объектов Gravatar, которые добавляются при вызове функции-обработчика; -- И, наконец, мы очищаем хранилище с помощью `clear Store()`, чтобы наш следующий тест можно было начать с нового и пустого объекта хранилища. Мы можем определить столько тестовых блоков, сколько захотим. +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. Вот и все - мы создали наш первый тест! 👏 @@ -668,7 +668,7 @@ test('Next test', () => { И если все пойдет хорошо, Вы увидите следующее приветствие: -![Matchstick с надписью “Все тесты пройдены!”](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## Распространенные сценарии тестирования @@ -754,18 +754,18 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### Имитация файлов IPFS (из matchstick 0.4.1) -Пользователи могут имитировать файлы IPFS с помощью функции `mockIpfsFile(hash, filePath)`. Функция принимает два аргумента, первый из которых - хэш/путь к файлу IPFS, а второй - путь к локальному файлу. +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -ПРИМЕЧАНИЕ: При тестировании `ipfs.map/ipfs.mapJSON` функция обратного вызова должна быть экспортирована из тестового файла, чтобы matchstck мог ее обнаружить, подобно функции `processGravatar()` в приведенном ниже примере теста: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -Файл `.test.ts`: +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' import { ipfs } from '@graphprotocol/graph-ts' import { gravatarFromIpfs } from './utils' -// Экспортируйте обратный вызов ipfs.map(), чтобы matchstick мог его обнаружить +// Export ipfs.map() callback in order for matchstck to detect it export { processGravatar } from './utils' test('ipfs.cat', () => { @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -Файл `utils.ts`: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Запуск функции assert.field Equals() проверит соответствие данного поля заданному ожидаемому значению. Тест завершится неудачей, и будет выведено сообщение об ошибке, если значения **НЕ** равны. В противном случае тест пройдет успешно. +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### Взаимодействие с метаданными событий -Пользователи могут по умолчанию использовать метаданные транзакции, которые могут быть возвращены в виде ethereum.Event с помощью функции `new MockEvent()`. В следующем примере показано, как можно считывать/записывать данные в эти поля объекта Event: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Чтение @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Утверждение о том, что объект **отсутствует** в хранилище +### Asserting that an Entity is **not** in the store Пользователи могут утверждать, что объект отсутствует в хранилище. Функция принимает тип объекта и идентификатор. Если объект действительно находится в хранилище, тест завершится неудачей с соответствующим сообщением об ошибке. Вот краткий пример использования этой функции: @@ -896,7 +896,7 @@ import { logStore } from 'matchstick-as/assembly/store' logStore() ``` -Начиная с версии 0.6.0, `logStore` больше не выводит производные поля. Вместо этого пользователи могут использовать новую функцию `logEntity`. Конечно, `logEntity` можно использовать для вывода любого объекта, а не только тех, у которых есть производные поля. `logEntity` принимает тип объекта, идентификатор объекта и флаг `showRelated`, указывающий, хочет ли пользователь вывести связанные производные объекты. +As of version 0.6.0, `logStore` no longer prints derived fields, instead users can use the new `logEntity` function. Of course `logEntity` can be used to print any entity, not just ones that have derived fields. `logEntity` takes the entity type, entity id and a `showRelated` flag to indicate if users want to print the related derived entities. ``` import { logEntity } from 'matchstick-as/assembly/store' @@ -960,14 +960,14 @@ test('Blow everything up', () => { Тестирование производных полей — это функция, которая позволяет пользователям устанавливать поле для определенного объекта и автоматически обновлять другой объект, если он извлекает одно из своих полей из первого объекта. -До версии `0.6.0` можно было получить производные объекты, обратившись к ним как к полям/свойствам объектов, например: +Before version `0.6.0` it was possible to get the derived entities by accessing them as entity fields/properties, like so: ```typescript let entity = ExampleEntity.load('id') let derivedEntity = entity.derived_entity ``` -Начиная с версии `0.6.0` это делается с помощью функции `loadRelated` graph-node, к производным объектам можно получить доступ так же, как и в обработчиках. +As of version `0.6.0`, this is done by using the `loadRelated` function of graph-node, the derived entities can be accessed the same way as in the handlers. ```typescript test('Derived fields example test', () => { @@ -1009,9 +1009,9 @@ test('Derived fields example test', () => { }) ``` -### Тестирование `loadInBlock` +### Testing `loadInBlock` -Начиная с версии `0.6.0`, пользователи могут тестировать `loadInBlock` с помощью `mockInBlockStore`, он позволяет имитировать объекты в кеше блоков. +As of version `0.6.0`, users can test `loadInBlock` by using the `mockInBlockStore`, it allows mocking entities in the block cache. ```typescript import { afterAll, beforeAll, describe, mockInBlockStore, test } from 'matchstick-as' @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### Тестирование динамических источников данных -Тестирование динамических источников данных может быть выполнено путем имитации возвращаемого значения функций `context()`, `address()` и `network()` пространства имен dataSource. В настоящее время эти функции возвращают следующее: `context()` - возвращает пустой объект (DataSourceContext), `address()` - возвращает `0x0000000000000000000000000000000000000000` `network()` - возвращает `mainnet`. Функции `create(...)` и `createWithContext(...)` замаскированы так, что они не выполняют никаких действий, поэтому их вообще не нужно вызывать в тестах. Изменения возвращаемых значений могут быть выполнены с помощью функций пространства имен `dataSourceMock` в `matchstick-as` (версия 0.3.0+). +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). Пример ниже: @@ -1097,44 +1097,44 @@ test('Data source simple mocking example', () => { ### Тестирование создания источника динамических данных -Начиная с версии `0.6.0`, можно проверить, был ли создан новый источник данных из шаблона. Эта функция поддерживает шаблоны ethereum/contract и file/ipfs. Для этого предусмотрены четыре функции: +As of version `0.6.0`, it is possible to test if a new data source has been created from a template. This feature supports both ethereum/contract and file/ipfs templates. There are four functions for this: -- `assert.dataSourceCount(templateName,expectedCount)` можно использовать для подтверждения ожидаемого количества источников данных из указанного шаблона -- `assert.dataSourceExists(templateName, адрес/ipfsHash)` подтверждает, что источник данных с указанным идентификатором (может быть адресом контракта или хешем файла IPFS) из указанного шаблона был создан -- `logDataSources(templateName)` выводит все источники данных из указанного шаблона на консоль в целях отладки -- `readFile(path)` считывает файл JSON, представляющий файл IPFS, и возвращает содержимое в виде байтов +- `assert.dataSourceCount(templateName, expectedCount)` can be used to assert the expected count of data sources from the specified template +- `assert.dataSourceExists(templateName, address/ipfsHash)` asserts that a data source with the specified identifier (could be a contract address or IPFS file hash) from a specified template was created +- `logDataSources(templateName)` prints all data sources from the specified template to the console for debugging purposes +- `readFile(path)` reads a JSON file that represents an IPFS file and returns the content as Bytes -#### Тестирование шаблонов `ethereum/contract` +#### Testing `ethereum/contract` templates ```typescript test('ethereum/contract dataSource creation example', () => { - // Подтверждаем, что не создано ни одного источника данных из шаблона GraphTokenLockWallet + // Assert there are no dataSources created from GraphTokenLockWallet template assert.dataSourceCount('GraphTokenLockWallet', 0) - // Создаем новый источник данных GraphTokenLockWallet с адресом 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A + // Create a new GraphTokenLockWallet datasource with address 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A GraphTokenLockWallet.create(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2A')) - // Подтверждаем, что источник данных создан + // Assert the dataSource has been created assert.dataSourceCount('GraphTokenLockWallet', 1) - // Добавляем второй источник данных с контекстом + // Add a second dataSource with context let context = new DataSourceContext() context.set('contextVal', Value.fromI32(325)) GraphTokenLockWallet.createWithContext(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'), context) - // Подтверждаем, что теперь есть 2 источника данных + // Assert there are now 2 dataSources assert.dataSourceCount('GraphTokenLockWallet', 2) - // Подтверждаем, что был создан источник данных с адресом "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" - // Имейте в виду, что тип `Address` преобразуется в строчные буквы при декодировании, поэтому адрес нужно передавать полностью в нижнем регистре, когда Вы проверяете его наличие. + // Assert that a dataSource with address "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" was created + // Keep in mind that `Address` type is transformed to lower case when decoded, so you have to pass the address as all lower case when asserting if it exists assert.dataSourceExists('GraphTokenLockWallet', '0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'.toLowerCase()) logDataSources('GraphTokenLockWallet') }) ``` -##### Пример вывода `logDataSource` +##### Example `logDataSource` output ```bash 🛠 { @@ -1158,11 +1158,11 @@ test('ethereum/contract dataSource creation example', () => { } ``` -#### Тестирование шаблонов `file/ipfs` +#### Testing `file/ipfs` templates -Аналогично контрактным источникам динамических данных пользователи могут тестировать источники данных тестовых файлов и их обработчики +Similarly to contract dynamic data sources, users can test test file data sources and their handlers -##### Пример `subgraph.yaml` +##### Example `subgraph.yaml` ```yaml ... @@ -1183,7 +1183,7 @@ templates: file: ./abis/GraphTokenLockWallet.json ``` -##### Пример `schema.graphql` +##### Example `schema.graphql` ```graphql """ @@ -1203,7 +1203,7 @@ type TokenLockMetadata @entity { } ``` -##### Пример `metadata.json` +##### Example `metadata.json` ```json { @@ -1218,9 +1218,9 @@ type TokenLockMetadata @entity { ```typescript export function handleMetadata(content: Bytes): void { - // dataSource.stringParams() возвращает CID источника данных файла - // stringParam() будет имитироваться в тесте обработчика - // для получения дополнительной информации https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files + // dataSource.stringParams() returns the File DataSource CID + // stringParam() will be mocked in the handler test + // for more info https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files let tokenMetadata = new TokenLockMetadata(dataSource.stringParam()) const value = json.fromBytes(content).toObject() @@ -1253,32 +1253,31 @@ import { TokenLockMetadata } from '../../generated/schema' import { GraphTokenLockMetadata } from '../../generated/templates' test('file/ipfs dataSource creation example', () => { - // Сгенерируйте the dataSource CID from the ipfsHash + ipfs path file - // Например, QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json + // Generate the dataSource CID from the ipfsHash + ipfs path file + // For example QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' const CID = `${ipfshash}/example.json` - // Создайте новый источник данных, используя сгенерированный CID + // Create a new dataSource using the generated CID GraphTokenLockMetadata.create(CID) - // Подтвердите, что источник данных создан + // Assert the dataSource has been created assert.dataSourceCount('GraphTokenLockMetadata', 1) assert.dataSourceExists('GraphTokenLockMetadata', CID) logDataSources('GraphTokenLockMetadata') - // Теперь нам нужно смоделировать метаданные dataSource, в частности, - dataSource.stringParam() - // dataSource.stringParams на самом деле использует значение dataSource.address(), поэтому мы будем имитировать адрес, используя dataSourceMock из matchstick-as - // Сначала мы сбросим значения, а затем используем dataSourceMock.setAddress() для установки CID + // Now we have to mock the dataSource metadata and specifically dataSource.stringParam() + // dataSource.stringParams actually uses the value of dataSource.address(), so we will mock the address using dataSourceMock from matchstick-as + // First we will reset the values and then use dataSourceMock.setAddress() to set the CID dataSourceMock.resetValues() dataSourceMock.setAddress(CID) - // Теперь нам нужно сгенерировать байты для передачи обработчику dataSource - // Для этого случая мы ввели новую функцию readFile, которая считывает локальный json и возвращает содержимое в виде байтов + // Now we need to generate the Bytes to pass to the dataSource handler + // For this case we introduced a new function readFile, that reads a local json and returns the content as Bytes const content = readFile(`path/to/metadata.json`) handleMetadata(content) - // Теперь проверим, был ли создан TokenLockMetadata + // Now we will test if a TokenLockMetadata was created const metadata = TokenLockMetadata.load(CID) assert.bigIntEquals(metadata!.endTime, BigInt.fromI32(1)) @@ -1290,29 +1289,29 @@ test('file/ipfs dataSource creation example', () => { ## Тестовое покрытие -Используя **Matchstick**, разработчики субграфов могут запустить скрипт, который вычислит тестовое покрытие написанных модульных тестов. +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -Инструмент тестового покрытия берет скомпилированные тестовые двоичные файлы `wasm` и преобразует их в файлы `wat`, которые затем можно легко проверить, были ли вызваны обработчики, определенные в `subgraph.yaml`. Поскольку покрытие кода (и тестирование в целом) в AssemblyScript и WebAssembly находится на очень ранних стадиях, **Matchstick** не может проверить покрытие ветвей. Вместо этого мы полагаемся на утверждение, что если был вызван данный обработчик, то событие/функция для него были должным образом имитированы. +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### Предварительные требования +### Prerequisites -Чтобы запустить функцию тестового покрытия, представленную в **Matchstick**, необходимо заранее подготовить несколько вещей: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### Экспортируйте свои обработчики -Для того чтобы **Matchstick** мог проверить, какие обработчики запущены, эти обработчики необходимо экспортировать из **тестового файла**. Так, например, в файле gravity.test.ts импортируется следующий обработчик: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -Чтобы эта функция была видимой (чтобы она была включена в файл `wat` **под именем**), нам нужно также экспортировать ее, например, так: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### Применение +### Usage После того как всё это будет настроено, чтобы запустить инструмент тестового покрытия, просто запустите: @@ -1320,7 +1319,7 @@ export { handleNewGravatar } graph test -- -c ``` -Вы также можете добавить пользовательскую команду `coverage` в свой файл `package.json`, например, так: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1378,7 +1377,7 @@ Global test coverage: 22.2% (2/9 handlers). > Критично: Не удалось создать WasmInstance из допустимого модуля с контекстом: неизвестный импорт: wasi_snapshot_preview1::fd_write не определен -Это означает, что Вы использовали в своем коде `console.log`, который не поддерживается AssemblyScript. Пожалуйста, рассмотрите возможность использования [API логирования](/subgraphs/developing/creating/graph-ts/api/#logging-api) +This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api) > ERROR TS2554: Expected ? arguments, but got ?. > @@ -1392,9 +1391,9 @@ Global test coverage: 22.2% (2/9 handlers). > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -Несовпадение в аргументах вызвано несоответствием в `graph-ts` и `matchstick-as`. Лучший способ устранить проблемы, подобные этой, - обновить всё до последней выпущенной версии. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. -## Дополнительные источники +## Дополнительные ресурсы For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). From 9a38891c92085ae4a563886e54cfd219aaaa5a79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:13 -0500 Subject: [PATCH 0208/1534] New translations unit-testing-framework.mdx (Swedish) --- .../creating/unit-testing-framework.mdx | 186 +++++++++--------- 1 file changed, 93 insertions(+), 93 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/sv/subgraphs/developing/creating/unit-testing-framework.mdx index 7f2c0d4330dc..49aea6a7f4da 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Enhetsprovningsramverk --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew install postgresql ``` -Skapa en symbolisk länk till den senaste libpq.5.lib._ Du kanske behöver skapa den här mappen först: _`/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ eller /node_modules/gluegun/build/index.js:13 throw up; ``` -Se till att du använder en nyare version av Node.js eftersom graph-cli inte längre stöder **v10.19.0**, och det är fortfarande standardversionen för nya Ubuntu-bilder på WSL. Till exempel är Matchstick bekräftat fungerande på WSL med **v18.1.0**. Du kan byta till den antingen via** nvm ** eller genom att uppdatera din globala Node.js. Glöm inte att ta bort `node_modules` och köra `npm install`igen efter att du har uppdaterat Node.js! Sedan, se till att du har **libpq** installerat, du kan göra det genom att köra +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -Och till sist, använd inte `graph test` (som använder din globala installation av graph-cli och av någon anledning ser ut som om det är trasig på WSL för närvarande), istället använd `yarn test` eller `npm run test` (det kommer att använda den lokala projektbaserade instansen av graph-cli, som fungerar utmärkt). För detta behöver du självklart ha ett `"test"`-skript i din `package.json`-fil, vilket kan vara något så enkelt som +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ Och till sist, använd inte `graph test` (som använder din globala installation ### Using Matchstick -För att använda **Matchstick** i ditt subgrafprojekt öppnar du bara en terminal, navigerar till rotmappen för ditt projekt och kör helt enkelt `graftest [options] ` - den laddar ner den senaste **Matchstick**-binären och kör det angivna testet eller alla tester i en testmapp (eller alla befintliga tester om ingen datakällasflagga är angiven). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI alternativ @@ -109,35 +109,35 @@ Då körs endast den specifika testfilen: graph test path/to/file.test.ts ``` -**Alternativ:** +**Options:** ```sh --c, --coverage Kör testerna i täckningsläge --d, --docker Kör testerna i en docker-container (Observera: Kör från rotmappen för subgraph) --f, --force Binär: Hämtar om binären. Docker: Hämtar om Dockerfilen och bygger om dockerbilden. --h, --help Visar användningsinformation --l, --logs Loggar till konsolen information om OS, CPU-modell och nedladdnings-URL (för felsökningssyften) --r, --recompile Tvingar testerna att kompileras om --v, --version Välj versionen av den rust binära som du vill att den ska hämtas/användas +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### Docker -Från `graph-cli 0.25.2` stöder kommandot `graph test` att köra `matchstick` i en Docker-behållare med flaggan `-d`. Docker-implementeringen använder [bind mount](https://docs.docker.com/storage/bind-mounts/) så att den inte behöver bygga om dockerbilden varje gång kommandot `graph test -d` körs. Alternativt kan du följa instruktionerna från [matchstick](https://github.com/LimeChain/matchstick#docker-) repository för att köra Docker manuellt. +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. ❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ Om du tidigare har kört `graph test` kan du stöta på följande fel under docker build: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -I det här fallet skapar du en `.dockerignore` i rotmappen och lägger till `node_modules/binary-install-raw/bin`. +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### Konfiguration -Matchstick kan konfigureras att använda en anpassad sökväg för tester, libs och manifest via konfigurationsfilen `matchstick.yaml`: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,11 +147,11 @@ manifestPath: path/to/subgraph.yaml ### Demo undergraf -Du kan prova och leka med exemplen från den här guiden genom att klona [Demo Subgraph-repot](https://github.com/LimeChain/demo-subgraph) +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Handledning för video -Du kan också kolla på videoserien om ["Hur man använder Matchstick för att skriva enhetstester för dina subgraph"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -159,11 +159,11 @@ _**IMPORTANT: The test structure described below depens on `matchstick-as` versi ### describe() -`describe(name: String , () => {})` - Definierar en testgrupp. +`describe(name: String , () => {})` - Defines a test group. -**_Noteringar:_** +**_Notes:_** -- _Describes är inte obligatoriska. Du kan fortfarande använda test() på det gamla sättet, utanför describe() blocken_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ Exempel: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -Nästat `describe()` exempel: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - Definierar ett testfall. Du kan använda test() inuti describe()-block eller fristående. +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. Exempel: @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Kör en kodblock före något av testen i filen. Om `beforeAll` deklareras inuti en `describe`-block körs den i början av det `describe`-blocket. +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. Exempel: -Kod inuti `beforeAll` kommer att utföras en gång före _alla_ tester i filen. +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -250,20 +250,20 @@ beforeAll(() => { ... }) -describe("När enheten inte existerar", () => { - test("det bör skapa en ny Gravatar med id 0x1", () => { +describe("When the entity does not exist", () => { + test("it should create a new Gravatar with id 0x1", () => { ... }) }) -describe("När enheten redan existerar", () => { - test("det bör uppdatera Gravatar med id 0x0", () => { +describe("When entity already exists", () => { + test("it should update the Gravatar with id 0x0", () => { ... }) }) ``` -Kod inuti `beforeAll` kommer att exekveras en gång före alla tester i det första beskrivningsblocket +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -278,11 +278,11 @@ describe("handleUpdatedGravatar()", () => { ... }) - test("uppdaterar Gravatar med id 0x0", () => { + test("updates Gravatar with id 0x0", () => { ... }) - test("skapar ny Gravatar med id 0x1", () => { + test("creates new Gravatar with id 0x1", () => { ... }) }) @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Kör en kodblock efter alla test i filen. Om `afterAll` deklareras inuti en `describe`-block körs den i slutet av det `describe`-blocket. +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. Exempel: -Kod inuti `afterAll` kommer att utföras en gång efter _alla_ tester i filen. +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -309,41 +309,41 @@ afterAll(() => { }) describe("handleNewGravatar, () => { - test("skapar Gravatar med id 0x0", () => { + test("creates Gravatar with id 0x0", () => { ... }) }) describe("handleUpdatedGravatar", () => { - test("uppdaterar Gravatar med id 0x0", () => { + test("updates Gravatar with id 0x0", () => { ... }) }) ``` -Kod inuti `afterAll` kommer att exekveras en gång efter alla tester i det första beskrivna blocket +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) - test("Det skapar en ny enhet med id 0x0", () => { + test("It creates a new entity with Id 0x0", () => { ... }) - test("Det skapar en ny enhet med id 0x1", () => { + test("It creates a new entity with Id 0x1", () => { ... }) }) describe("handleUpdatedGravatar", () => { - test("uppdaterar Gravatar med id 0x0", () => { + test("updates Gravatar with id 0x0", () => { ... }) }) @@ -353,24 +353,24 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Kör en kodblock före varje test. Om `beforeEach` deklareras inuti en `describe`-block körs den före varje test i det `describe`-blocket. +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -Exempel: Koden inuti `beforeEach` kommer att utföras före varje test. +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" import { handleNewGravatars } from "./utils" beforeEach(() => { - clearStore() // <-- rensa butiken före varje test i filen + clearStore() // <-- clear the store before each test in the file }) describe("handleNewGravatars, () => { - test("Ett test som kräver en ren butik", () => { + test("A test that requires a clean store", () => { ... }) - test("Andra som kräver en ren butik", () => { + test("Second that requires a clean store", () => { ... }) }) @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -Kod inuti `beforeEach` kommer att exekveras endast före varje test i den som beskriver +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -387,15 +387,15 @@ import { handleUpdatedGravatar, handleNewGravatar } from '../../src/gravity' describe('handleUpdatedGravatars', () => { beforeEach(() => { let gravatar = new Gravatar('0x0') - gravatar.displayName = 'Första Gravatar' + gravatar.displayName = 'First Gravatar' gravatar.imageUrl = '' gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - // kod som ska uppdatera displayName till 1st Gravatar + // code that should update the displayName to 1st Gravatar assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') store.remove('Gravatar', '0x0') @@ -404,7 +404,7 @@ describe('handleUpdatedGravatars', () => { test('Updates the imageUrl', () => { assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - // kod som ska ändra imageUrl till https://www.gravatar.com/avatar/0x0 + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') store.remove('Gravatar', '0x0') @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Kör en kodblock efter varje test. Om `afterEach` deklareras inuti en `describe`-block körs den efter varje test i det `describe`-blocket. +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. Exempel: -Kod inuti `afterEach` kommer att utföras efter varje test. +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,10 +441,10 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // kod som ska uppdatera displayName till 1st Gravatar + // code that should update the displayName to 1st Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -452,14 +452,14 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // kod som ska ändra imageUrl till https://www.gravatar.com/avatar/0x0 + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -Kod i `afterEach` kommer att exekveras efter varje test i den beskrivningen +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,10 +481,10 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // kod som ska uppdatera displayName till 1st Gravatar + // code that should update the displayName to 1st Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -492,7 +492,7 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // kod som ska ändra imageUrl till https://www.gravatar.com/avatar/0x0 + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) @@ -565,7 +565,7 @@ assert.dataSourceExists( ## Skriv en enhetstest -Låt oss se hur ett enkelt enhetstest skulle se ut med hjälp av Gravatar-exemplen i [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). Antag att vi har följande hanteringsfunktion (tillsammans med två hjälpfunktioner för att göra vårt liv enklare): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -627,23 +627,23 @@ import { NewGravatar } from '../../generated/Gravity/Gravity' import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' test('Can call mappings with custom events', () => { - // Skapa en testenhet och spara den i arkivet som initialtillstånd (valfritt) + // Create a test entity and save it in the store as initial state (optional) let gravatar = new Gravatar('gravatarId0') gravatar.save() - // Skapa låtsashändelser + // Create mock events let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - // Anropa mappningsfunktioner som skickar händelserna vi just skapade + // Call mapping functions passing the events we just created handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - // Bekräfta butikens tillstånd + // Assert the state of the store assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - // Rensa lagret för att starta nästa test med en ny start + // Clear the store in order to start the next test off on a clean slate clearStore() }) @@ -652,13 +652,13 @@ test('Next test', () => { }) ``` -Det är mycket att ta in! Först och främst är det viktigt att notera att vi importerar saker från `matchstick-as`, vår AssemblyScript hjälpbibliotek (distribuerat som ett npm-paket). Du kan hitta lagringsplatsen [här](https://github.com/LimeChain/matchstick-as). `matchstick-as` förser oss med användbara testmetoder och definierar också funktionen `test()` som vi kommer att använda för att bygga våra testblock. Resten är ganska självförklarande - här är vad som händer: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - Vi ställer in vår inledande status och lägger till en anpassad Gravatar-entitet; -- Vi definierar två `NewGravatar` händelseobjekt tillsammans med deras data, med hjälp av funktionen `createNewGravatarEvent()`. -- Vi kallar på våra hanteringsmetoder för dessa händelser - `handleNewGravatars()` och skickar in listan med våra anpassade händelser; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - Vi försäkrar oss om statusen för lagringen. Hur fungerar det? - Vi skickar en unik kombination av entitetstyp och id. Sedan kontrollerar vi ett specifikt fält på den entiteten och försäkrar oss om att det har det värde vi förväntar oss. Vi gör detta både för den ursprungliga Gravatar-entiteten vi lade till i lagringen och de två Gravatar-entiteterna som läggs till när hanteringsfunktionen anropas; -- Och sist men inte minst - vi rensar lagringen med hjälp av `clearStore()` så att vårt nästa test kan börja med en fräsch och tom lagringsobjekt. Vi kan definiera så många testblock som vi vill. +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. Så där har vi skapat vårt första test! 👏 @@ -668,7 +668,7 @@ För att köra våra tester behöver du helt enkelt köra följande i din subgra Och om allt går bra bör du hälsas av följande: -![Matchstick säger Alla tester har passerat](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## Vanliga testscenarier @@ -754,18 +754,18 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### Simulering av IPFS-filer (från matchstick 0.4.1) -Användare kan simulera IPFS-filer genom att använda funktionen `mockIpfsFile(hash, filePath)`. Funktionen accepterar två argument, det första är IPFS-filens hash/sökväg och det andra är sökvägen till en lokal fil. +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -OBS: När du testar `ipfs.map/ipfs.mapJSON` måste callback-funktionen exporteras från testfilen för att matchstck ska upptäcka den, liknande `processGravatar()`-funktionen i testexemplet nedan: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -`.test.ts` fil: +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' import { ipfs } from '@graphprotocol/graph-ts' import { gravatarFromIpfs } from './utils' -// Exportera ipfs.map() callback så att matchstck kan upptäcka den +// Export ipfs.map() callback in order for matchstck to detect it export { processGravatar } from './utils' test('ipfs.cat', () => { @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` fil: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Körning av funktionen assert.fieldEquals() kommer att kontrollera om det angivna fältet är lika med det förväntade värdet. Testet kommer att misslyckas och ett felmeddelande kommer att visas om värdena **INTE** är lika. Annars kommer testet att passera framgångsrikt. +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### Interagera med händelsemetadata -Användare kan använda standardtransaktionsmetadata, som kan returneras som en ethereum.Event genom att använda funktionen `newMockEvent()`. Följande exempel visar hur du kan läsa/skriva till de fälten på Event-objektet: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Läs @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Påstå att en entitet **inte** finns i butiken +### Asserting that an Entity is **not** in the store Användare kan hävda att en entitet inte finns i butiken. Funktionen tar en entitetstyp och ett id. Om entiteten faktiskt finns i butiken kommer testet att misslyckas med ett relevant felmeddelande. Här är ett snabbt exempel på hur du använder den här funktionen: @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### Testning av dynamiska datakällor -Testning av dynamiska datakällor kan göras genom att moka returvärdena för funktionerna `context()`, `address()` och `network()` i dataSource-namespace. Dessa funktioner returnerar för närvarande följande: `context()` - returnerar en tom entitet (DataSourceContext), `address()` - returnerar `0x0000000000000000000000000000000000000000`, `network()` - returnerar `mainnet`. Funktionerna `create(...)` och `createWithContext(...)` mokas för att inte göra något, så de behöver inte anropas i testerna alls. Ändringar av returvärden kan göras genom funktionerna i namespace `dataSourceMock` i `matchstick-as` (version 0.3.0+). +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). Exempel nedan: @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1289,29 +1289,29 @@ test('file/ipfs dataSource creation example', () => { ## Testtäckning -Med **Matchstick** kan subgraph-utvecklare köra ett skript som beräknar täckningen av de skrivna enhetstesterna. +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -Verktyget för testtäckning tar de kompilerade test `wasm` binärerna och omvandlar dem till `wat`filer, som sedan enkelt kan inspekteras för att se om hanterarna som är definierade i `subgraph.yaml` har blivit kallade eller inte. Eftersom kodtäckning (och tester som helhet) är i mycket tidiga stadier i AssemblyScript och WebAssembly kan **Matchstick** inte kontrollera grentäckning. Istället förlitar vi oss på påståendet att om en given hanterare har blivit kallad, har händelsen/funktionen för den hanteraren blivit korrekt mockad. +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### Förutsättningar +### Prerequisites -För att köra testtäckningsfunktionaliteten som tillhandahålls i **Matchstick** måste du förbereda några saker i förväg: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### Exportera dina hanterare -För att **Matchstick** ska kunna kontrollera vilka hanterare som körs måste dessa hanterare exporteras från **testfilen**. Till exempel i vårt exempel, i vår fil gravity.test.ts, har vi följande hanterare som importeras: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -För att denna funktion skall vara synlig (för att den skall ingå i `wat`-filen **med namn**) måste vi också exportera den, så här: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### Användning +### Usage När allt är klart kör du bara testtäckningsverktyget: @@ -1319,7 +1319,7 @@ När allt är klart kör du bara testtäckningsverktyget: graph test -- -c ``` -Du kan också lägga till ett anpassat `coverage`-kommando i din `package.json`-fil, så här: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1391,7 +1391,7 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -Motsägelsen i argumenten beror på en motsägelse i `graph-ts` och `matchstick-as`. Det bästa sättet att åtgärda problem som detta är att uppdatera allt till den senaste utgivna versionen. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. ## Ytterligare resurser From 846f5fbac76e8c2fe32684882a52c2107d7f02e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:14 -0500 Subject: [PATCH 0209/1534] New translations unit-testing-framework.mdx (Turkish) --- .../creating/unit-testing-framework.mdx | 285 +++++++++--------- 1 file changed, 142 insertions(+), 143 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/tr/subgraphs/developing/creating/unit-testing-framework.mdx index 4a594d233599..fe203de9b520 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Birim Testi Framework'ü --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew install postgresql ``` -En son libpq.5.lib'e bir sembolik bağ oluşturun. _Bu dizini önce oluşturmanız gerekebilir_ `/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ yada /node_modules/gluegun/build/index.js:13 throw up; ``` -Lütfen daha yeni bir Node.js sürümünde olduğunuzdan emin olun. graph-cli artık **v10.19.0**'ı desteklemiyor ve bu sürüm, hala WSL'deki yeni Ubuntu görüntülerinin varsayılan sürümüdür. Örneğin, Matchstick'in **v18.1.0** ile WSL'de çalıştığı doğrulandı, **nvm** aracılığıyla veya global Node.js'inizi güncelleyerek buna geçebilirsiniz. Nodejs'nizi güncelledikten sonra `node_modules`'ı silmeyi ve `node_modules`'u tekrar çalıştırmayı unutmayın! Daha sonra, **libpq** yüklü olduğundan emin olun, bunu çalıştırarak yapabilirsiniz +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -Son olarak, `graph test`'i kullanmayın (global graph-cli yüklemenizi kullanmaktadır ve bazı nedenlerden dolayı şu anda WSL'de bozuk gibi görünüyor), bunun yerine `yarn test` veya `npm run test` kullanın (bu, proje düzeyindeki yerel graph-cli örneğini kullanacaktır, bu da harika çalışır). Bunun için tabiki `package.json` dosyanızda bir `"test"` script'i olması gerektiğini unutmayın, bunun gibi basit bir şey olabilir +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ Son olarak, `graph test`'i kullanmayın (global graph-cli yüklemenizi kullanmak ### Using Matchstick -**Matchstick**'i subgraph proje'nizde kullanmak için sadece bir terminal açın, proje'nizin kök(root) klasörüne gidin ve basitçe `graph test [options] ` - komutunu çalıştırın - bu en son **Matchstick** ikili dosyasını indirir ve belirtilen testi veya test klasöründeki tüm testleri çalıştırır (verikaynağı bayrağı belirtilmezse mevcut tüm testler). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI seçenekleri @@ -109,35 +109,35 @@ Bu sadece belirtilen test dosyasını çalıştıracaktır: graph test path/to/file.test.ts ``` -**Seçenekler:** +**Options:** ```sh --c, --coverage Testleri kapsama modunda çalıştırır --d, --docker Testleri bir docker konteynerinde çalıştırır (Not: Subgraph kök klasöründen çalıştırın) --f, --force İkili: İkilinin yeniden indirilmesini sağlar. Docker: Dockerfile'ın yeniden indirilmesi ve docker görüntüsünün yeniden oluşturulması. --h, --help Kullanım bilgilerini gösterir --l, --logs İşletim sistemi, CPU modeli ve indirme URL'si hakkında konsola günlük bilgilerini yazar (hata ayıklama amaçlıdır) --r, --recompile Testlerin yeniden derlenmesini zorlar --v, --version İndirmek/kullanmak istediğiniz rust ikilisinin sürümünü seçmenize yarar +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### Docker -`graph-cli 0.25.2`'den itibaren `graph test` `-d` bayrağı ile `matchstick`'i bir docker konteynerinde çalıştırmayı desteklemektedir. Docker uygulaması, [bind mount](https://docs.docker.com/storage/bind-mounts/) kullandığından, `graph test -d` komutu her çalıştırıldığında docker görüntüsünü yeniden oluşturmak zorunda değildir. Alternatif olarak, [matchstick](https://github.com/LimeChain/matchstick#docker-) deposundan docker'ı manuel olarak çalıştırmak için talimatları izleyebilirsiniz. +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. -❗ `graph test -d`, `docker run`'un `-t` bayrağıyla çalışmasını zorlar. Bu, etkileşimli olmayan ortamlar (örneğin, GitHub CI) içinde çalıştırmak için kaldırılmalıdır. +❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ Daha önce `graph test` çalıştırdıysanız, docker build sırasında aşağıdaki hatayla karşılaşabilirsiniz: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -Bu durumda, kök klasör içinde bir `.dockerignore` dosyası oluşturun ve `node_modules/binary-install-raw/bin`'i ekleyin +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### Yapılandırma -Matchstick, `matchstick.yaml` yapılandırma dosyası aracılığıyla özel testler, kütüphaneler ve manifest yolunu kullanacak şekilde yapılandırılabilir: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,23 +147,23 @@ manifestPath: path/to/subgraph.yaml ### Demo subgraph -[Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph)sunu klonlayarak bu kılavuzdaki örnekleri deneyebilir ve ve istediğinizi yapabilirsiniz +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### Öğretici videolar -Ayrıca, "["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h)" konulu video serisine göz atabilirsiniz +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Test yapısı -_**ÖNEMLİ: Aşağıda açıklanan test yapısı `matchstick-as` sürümünün >=0.5.0 olmasını gerektirir**_ +_**IMPORTANT: The test structure described below depens on `matchstick-as` version >=0.5.0**_ ### describe() -`describe(name: String , () => {})` - Bir test grubunu tanımlar. +`describe(name: String , () => {})` - Defines a test group. -**_Notlar:_** +**_Notes:_** -- _Açıklamalar zorunlu değildir. Hala test() fonksiyonunu describe() bloklarının dışında kullanabilirsiniz_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ Örnek: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -İç içe `describe()` örneği: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - Bir test durumu tanımlar. test() fonksiyonunu describe() blokları içinde veya bağımsız olarak kullanabilirsiniz. +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. Örnek: @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Dosyadaki tüm testlerden önce bir kod bloğu çalıştırır. `beforeAll`, `describe` bloklarının içinde tanımlanırsa, o `describe` bloğunun başında çalışır. +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. Örnekler: -`beforeAll` içindeki kod, dosyadaki _tüm_ testlerden önce bir kez çalıştırılacaktır. +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -`beforeAll` içindeki kod, ilk describe bloğundaki tüm testlerden önce bir kez çalıştırılacaktır +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Dosyadaki tüm testlerden sonra bir kod bloğu çalıştırır. `afterAll`, `describe` bloklarının içinde tanımlanırsa, o `describe` bloğunun sonunda çalışır. +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. Örnek: -`afterAll` içindeki kod, dosyadaki _tüm_ testlerden sonra bir kez çalıştırılacaktır. +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -`afterAll` içindeki kod, ilk describe bloğundaki tüm testlerden sonra bir kez çalıştırılacaktır +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Her testten önce bir kod bloğu çalıştırır. `beforeEach`, `describe` bloklarının içinde tanımlanırsa, o `describe` bloğunun her testinden önce çalıştırılır. +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -Örnekler: `beforeEach` içindeki kod, her testten önce çalıştırılacaktır. +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -`beforeEach` içindeki kod, yalnızca o describe bloğundaki her testten önce çalıştırılacaktır +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,10 +392,10 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - // displayName'i 1. Gravatar olarak güncellemesi gereken kod + // code that should update the displayName to 1st Gravatar assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') store.remove('Gravatar', '0x0') @@ -404,7 +404,7 @@ describe('handleUpdatedGravatars', () => { test('Updates the imageUrl', () => { assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - // imageUrl'yi https://www.gravatar.com/avatar/0x0 olarak değiştirmesi gereken kod + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') store.remove('Gravatar', '0x0') @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Her testten sonra bir kod bloğu çalıştırır. `afterEach`, `describe` bloklarının içinde tanımlanırsa, o `describe` bloğunun her testinden sonra çalıştırılır. +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. Örnekler: -`afterEach` içindeki kod, her testten sonra çalıştırılacaktır. +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,10 +441,10 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // displayName'i 1. Gravatar olarak güncellemesi gereken kod + // code that should update the displayName to 1st Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -452,14 +452,14 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // imageUrl'yi https://www.gravatar.com/avatar/0x0 olarak değiştirmesi gereken kod + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -`afterEach` içindeki kod, yalnızca o describe bloğundaki her testten sonra çalıştırılacaktır +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,10 +481,10 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // displayName'i 1. Gravatar olarak güncellemesi gereken kod + // code that should update the displayName to 1st Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -492,7 +492,7 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // imageUrl'yi https://www.gravatar.com/avatar/0x0 olarak değiştirmesi gereken kod + // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) @@ -536,36 +536,36 @@ entityCount(entityType: string, expectedCount: i32) Sürüm 0.6.0 itibariyle, assert fonksiyonları özel hata mesajlarını da desteklemektedir ```typescript -assert.fieldEquals('Gravatar', '0x123', 'id', '0x123', 'Id 0x123 olmalıdır') -assert.equals(ethereum.Value.fromI32(1), ethereum.Value.fromI32(1), 'Value 1'e eşit olmalıdır') -assert.notInStore('Gravatar', '0x124', 'Gravatar store'da olmamalıdır') -assert.addressEquals(Address.zero(), Address.zero(), 'Adres sıfır olmalıdır') -assert.bytesEquals(Bytes.fromUTF8('0x123'), Bytes.fromUTF8('0x123'), 'Byte değerleri eşit olmalıdır') -assert.i32Equals(2, 2, 'I32, 2'ye eşit olmalıdır') -assert.bigIntEquals(BigInt.fromI32(1), BigInt.fromI32(1), 'BigInt 1'e eşit olmalıdır') -assert.booleanEquals(true, true, 'Boolean true olmalıdır') -assert.stringEquals('1', '1', 'String 1'e eşit olmalıdır') -assert.arrayEquals([ethereum.Value.fromI32(1)], [ethereum.Value.fromI32(1)], 'Array'ler eşit olmalıdır') +assert.fieldEquals('Gravatar', '0x123', 'id', '0x123', 'Id should be 0x123') +assert.equals(ethereum.Value.fromI32(1), ethereum.Value.fromI32(1), 'Value should equal 1') +assert.notInStore('Gravatar', '0x124', 'Gravatar should not be in store') +assert.addressEquals(Address.zero(), Address.zero(), 'Address should be zero') +assert.bytesEquals(Bytes.fromUTF8('0x123'), Bytes.fromUTF8('0x123'), 'Bytes should be equal') +assert.i32Equals(2, 2, 'I32 should equal 2') +assert.bigIntEquals(BigInt.fromI32(1), BigInt.fromI32(1), 'BigInt should equal 1') +assert.booleanEquals(true, true, 'Boolean should be true') +assert.stringEquals('1', '1', 'String should equal 1') +assert.arrayEquals([ethereum.Value.fromI32(1)], [ethereum.Value.fromI32(1)], 'Arrays should be equal') assert.tupleEquals( - changetype([ethereum.Value.fromI32(1)]), - changetype([ethereum.Value.fromI32(1)]), - 'Tuple'lar eşit olmalıdır', + changetype([ethereum.Value.fromI32(1)]), + changetype([ethereum.Value.fromI32(1)]), + 'Tuples should be equal', ) -assert.assertTrue(true, 'True olmalıdır') -assert.assertNull(null, 'Null olmalıdır') -assert.assertNotNull('not null', 'Null olmamalıdır') -assert.entityCount('Gravatar', 1, '2 gravatar olmalıdır') -assert.dataSourceCount('GraphTokenLockWallet', 1, 'GraphTokenLockWallet şablonunda bir veri kaynağı olmalıdır') +assert.assertTrue(true, 'Should be true') +assert.assertNull(null, 'Should be null') +assert.assertNotNull('not null', 'Should be not null') +assert.entityCount('Gravatar', 1, 'There should be 2 gravatars') +assert.dataSourceCount('GraphTokenLockWallet', 1, 'GraphTokenLockWallet template should have one data source') assert.dataSourceExists( - 'GraphTokenLockWallet', - Address.zero().toHexString(), - 'GraphTokenLockWallet sıfır adresi için bir veri kaynağına sahip olmalıdır', + 'GraphTokenLockWallet', + Address.zero().toHexString(), + 'GraphTokenLockWallet should have a data source for zero address', ) ``` ## Bir Birim Testi Yazın -[Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts)'taki Gravatar örneklerini kullanarak nasıl basit bir birim test görüneceğini görelim. +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). Aşağıdaki işleyici fonksiyonuna sahip olduğumuzu varsayarsak (iki yardımcı işlevle birlikte): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -627,23 +627,23 @@ import { NewGravatar } from '../../generated/Gravity/Gravity' import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' test('Can call mappings with custom events', () => { - // Bir test varlığı oluşturun ve bunu depoya başlangıç durumu olarak kaydedin (isteğe bağlı) + // Create a test entity and save it in the store as initial state (optional) let gravatar = new Gravatar('gravatarId0') gravatar.save() - // Mock etkinlikleri oluşturun + // Create mock events let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - // Az önce oluşturduğumuz olayları geçiren çağrı eşleştirme fonksiyonları + // Call mapping functions passing the events we just created handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - // Deponun durumunu doğrulayın + // Assert the state of the store assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - // Bir sonraki testi temiz bir sayfa üzerinde başlatmak için depoyu boşaltın + // Clear the store in order to start the next test off on a clean slate clearStore() }) @@ -652,13 +652,13 @@ test('Next test', () => { }) ``` -Analiz edilecek çok fazla şey var! Öncelikle fark etmemiz gereken önemli şey AssemblyScript yardımcı kütüphanemiz (npm modülü olarak dağıtılır) `matchstick-as`'den işleri içe aktardığımız. Repositoriyi [burada](https://github.com/LimeChain/matchstick-as) bulabilirsiniz. `matchstick-as` bize yararlı test yöntemleri sağlar ve ayrıca test blokları oluşturmak için kullanacağımız `test()` işlevini tanımlar. Geri kalanı oldukça açık - şöyle olur: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - İlk durumumuzu ayarlıyor ve bir özel Gravatar varlığı ekliyoruz; -- `createNewGravatarEvent()` fonksiyonunu kullanarak verileriyle birlikte iki `NewGravatar`r olay nesnesini tanımlıyoruz; -- `handleNewGravatars()` yöntemlerimizi bu olaylar için çağırıyoruz ve özel olay listemizi geçiyoruz; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - Depo durumunu doğruluyoruz. Bu nasıl çalışır? - Bir varlık türü ve kimliğinin benzersiz bir kombinasyonunu geçiriyoruz. Ardından, bu varlıkta belirli bir alanı kontrol ediyoruz ve beklediğimiz değeri almasını sağlıyoruz. Hem depoya eklediğimiz ilk Gravatar Varlığı için hem de işleyici işlevi çağrıldığında eklenen iki Gravatar varlığı için bunu yapıyoruz; -- Ve son olarak `clearStore()` kullanarak depoyu temizliyoruz, böylece bir sonraki testimiz temiz ve boş bir depo nesnesiyle başlayabilir. İstediğimiz kadar test bloğu tanımlayabiliriz. +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. İşte başardın - ilk testimizi oluşturduk! 👏 @@ -668,7 +668,7 @@ Analiz edilecek çok fazla şey var! Öncelikle fark etmemiz gereken önemli şe Ve her şey yolunda giderse aşağıdakiyle karşılaşacaksınız: -![Matchstick "Tüm testler geçildi!" diyor](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## Çok rastlanan test senaryoları @@ -754,18 +754,18 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### IPFS dosyalarını mocklama (Matchstick 0.4.1'den) -Kullanıcılar `mockIpfsFile(hash, filePath)` fonksiyonunu kullanarak IPFS dosyalarını mocklama yeteneğine sahiptirler. Fonksiyon, ilk argümanı IPFS dosya hash/yol'u ve ikinci argümanı yerel bir dosyanın yolu olmak üzere iki argüman kabul eder. +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -NOT: `ipfs.map/ipfs.mapJSON`'u test ederken matchstck'in bunu algılaması için geri çağrıma işlevinin test dosyasından dışa aktarılması gerekiyor, örneğin aşağıdaki test örneğindeki `processGravatar()` fonksiyonu gibi: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -`.test.ts` dosyası: +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' import { ipfs } from '@graphprotocol/graph-ts' import { gravatarFromIpfs } from './utils' -// Matchstck'in algılaması için ipfs.map() geri çağrısını dışa aktarın +// Export ipfs.map() callback in order for matchstck to detect it export { processGravatar } from './utils' test('ipfs.cat', () => { @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` dosyası: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -assert.fieldEquals () fonksiyonunu çalıştırmak, verilen alanın verilen beklenen değere karşı eşitliğini kontrol edecektir. Değerler eşit **DEĞİLSE** test başarısız olacak ve bir hata mesajı verecektir. Aksi takdirde, test başarılı bir şekilde geçecektir. +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### Olay üst verileriyle etkileşim -Kullanıcılar `newMockEvent()` fonksiyonunu kullanarak ethereum.Event döndürebilen varsayılan işlem üst verilerini kullanabilir. Aşağıdaki örnek, Olay nesnesindeki bu alanlara nasıl okuma/yazma yapabileceğinizi gösterir: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Okuma @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Bir varlığın depoda olmadığını(**not**) doğrulama +### Asserting that an Entity is **not** in the store Kullanıcılar, bir varlığın depoda olmadığını doğrulayabilirler. Bu fonksiyon, bir varlık türü ve bir kimlik alır. Eğer varlık gerçekten depoda ise, test ilgili bir hata mesajı vererej başarısız olacaktır. Fonksiyonun nasıl kullanılacağına dair hızlıca bir örneğe bakalım: @@ -896,7 +896,7 @@ import { logStore } from 'matchstick-as/assembly/store' logStore() ``` -Sürüm 0.6.0 itibarıyla, `logStore` türetilmiş alanları yazdırmaz. Bunun yerine kullanıcılar yeni `logEntity` fonksiyonunu kullanabilir. Elbette ki `logEntity`, yalnızca türetilmiş alanlara sahip olanları değil, herhangi bir varlığı yazdırmak için kullanılabilir. `logEntity`, varlık türünü, varlık kimliğini ve ilgili türetilmiş varlıkları yazdırmak isteyip istemediğinizi belirten bir `showRelated` bayrağını alır. +As of version 0.6.0, `logStore` no longer prints derived fields, instead users can use the new `logEntity` function. Of course `logEntity` can be used to print any entity, not just ones that have derived fields. `logEntity` takes the entity type, entity id and a `showRelated` flag to indicate if users want to print the related derived entities. ``` import { logEntity } from 'matchstick-as/assembly/store' @@ -960,14 +960,14 @@ Kritik hataları kayıt altına almak, testlerin yürütülmesini durduracak ve Türetilmiş alanları test etme özelliği, belirli bir varlıkta bir alan belirleyip, bu varlıktan türetilmiş bir alan içeriyoren başka bir varlığın otomatik olarak güncellenmesini sağlayan bir özelliktir. -Sürüm `0.6.0` öncesinde, türetilmiş varlıklarıa varlık alanları/özellikleri olarak erişerek şu şekilde ulaşmak mümkündü: +Before version `0.6.0` it was possible to get the derived entities by accessing them as entity fields/properties, like so: ```typescript let entity = ExampleEntity.load('id') let derivedEntity = entity.derived_entity ``` -Sürüm `0.6.0` itibarıyla bu işlem, graph-node’un `loadRelated` fonksiyonu kullanılarak yapılmaktadır. Türetilmiş varlıklara, işleyicilerdekiyle aynı şekilde erişilebilir. +As of version `0.6.0`, this is done by using the `loadRelated` function of graph-node, the derived entities can be accessed the same way as in the handlers. ```typescript test('Derived fields example test', () => { @@ -1009,9 +1009,9 @@ test('Derived fields example test', () => { }) ``` -### `loadInBlock`'u Test Etme +### Testing `loadInBlock` -Sürüm `0.6.0` itibarıyla, kullanıcılar `loadInBlock` işlevini `mockInBlockStore` kullanarak test edebilir, bu sayede blok önbelleğindeki varlıkları simüle edebilirler. +As of version `0.6.0`, users can test `loadInBlock` by using the `mockInBlockStore`, it allows mocking entities in the block cache. ```typescript import { afterAll, beforeAll, describe, mockInBlockStore, test } from 'matchstick-as' @@ -1026,12 +1026,12 @@ describe('loadInBlock', () => { clearInBlockStore() }) - test('entity.loadInBlock() metodunu kullanarak mevcut bloktaki önbellek depodan varlığı alabilir', () => { + test('Can use entity.loadInBlock() to retrieve entity from cache store in the current block', () => { let retrievedGravatar = Gravatar.loadInBlock('gravatarId0') assert.stringEquals('gravatarId0', retrievedGravatar!.get('id')!.toString()) }) - test('Varlık mevcut blokta yoksa entity.loadInBlock() çağrıldığında null döndürür', () => { + test("Returns null when calling entity.loadInBlock() if an entity doesn't exist in the current block", () => { let retrievedGravatar = Gravatar.loadInBlock('IDoNotExist') assert.assertNull(retrievedGravatar) }) @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### Dinamik Veri Kaynaklarının Test Edilmesi -Dinamik veri kaynaklarının test edilmesi dataSource ad alanının `context()`, `address()` ve `network()` fonksiyonlarının geri dönüş değerlerinin mocklanmasıyla yapılabilir. Bu fonksiyonlar şu anda şunları döndürmektedir: `context()` - boş bir varlık döndürür (DataSourceContext), `address()` - `0x0000000000000000000000000000000000000000` döndürür, `network()` - `mainnet` döndürür. `create(...)` ve `createWithContext(...)` fonksiyonları hiçbir şey yapmamak için mocklanmıştır bu nedenle testlerde çağrılmaları gerekmez. Dönüş değerlerinde yapılacak değişiklikler `matchstick-as`'deki (version 0.3.0+)`dataSourceMock` ad alanının fonksiyonlarıyla yapılabilir. +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). Aşağıdaki örnekte: @@ -1097,44 +1097,44 @@ dataSourceMock.resetValues()'in en sonda çağrıldığına dikkat edin. Bunun n ### Dinamik veri kaynağı oluşturulmasının test edilmesi -Sürüm `0.6.0` itibarıyla, yeni bir veri kaynağının bir şablonu temel alarak oluşturulup oluşturulmadığını test etmek mümkündür. Bu özellik hem ethereum/sözleşme hem de dosya/ipfs şablonlarını destekler. Bunun için dört fonksiyon bulunmaktadır: +As of version `0.6.0`, it is possible to test if a new data source has been created from a template. This feature supports both ethereum/contract and file/ipfs templates. There are four functions for this: -- `assert.dataSourceCount(templateName, expectedCount)` belirli bir şablondan beklenen veri kaynağı sayısını doğrulamak için kullanılabilir -- `assert.dataSourceExists(templateName, address/ipfsHash)` belirli bir şablondan belirtilen kimliğe sahip (bu kimlik sözleşme adresi veya IPFS dosya hash’i olabilir) bir veri kaynağının oluşturulmuş olduğunu doğrular -- `logDataSources(templateName)`, belirtilen şablonda bulunan tüm veri kaynaklarını hata ayıklama amacıyla konsola yazdırır -- `readFile(path)`, bir IPFS dosyasını temsil eden JSON dosyasını okur ve içeriği Bytes olarak döndürür +- `assert.dataSourceCount(templateName, expectedCount)` can be used to assert the expected count of data sources from the specified template +- `assert.dataSourceExists(templateName, address/ipfsHash)` asserts that a data source with the specified identifier (could be a contract address or IPFS file hash) from a specified template was created +- `logDataSources(templateName)` prints all data sources from the specified template to the console for debugging purposes +- `readFile(path)` reads a JSON file that represents an IPFS file and returns the content as Bytes -#### `ethereum/contract` şablonlarının test edilmesi +#### Testing `ethereum/contract` templates ```typescript -test('ethereum/sözleşme dataSource oluşturma örneği', () => { - // GraphTokenLockWallet şablonundan oluşturulmuş veri kaynağı olmadığını doğrula +test('ethereum/contract dataSource creation example', () => { + // Assert there are no dataSources created from GraphTokenLockWallet template assert.dataSourceCount('GraphTokenLockWallet', 0) - // 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A adresiyle yeni bir GraphTokenLockWallet veri kaynağı oluştur + // Create a new GraphTokenLockWallet datasource with address 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A GraphTokenLockWallet.create(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2A')) - // datSource oluşturulduğunu doğrula + // Assert the dataSource has been created assert.dataSourceCount('GraphTokenLockWallet', 1) - // İkinci dataSource'u bağlamıyla ekle + // Add a second dataSource with context let context = new DataSourceContext() context.set('contextVal', Value.fromI32(325)) GraphTokenLockWallet.createWithContext(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'), context) - // Şimdi 2 veri kaynağı olduğunu doğrula + // Assert there are now 2 dataSources assert.dataSourceCount('GraphTokenLockWallet', 2) - // "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" adresine sahip bir veri kaynağının oluşturulduğunu doğrula - // `Address` türü çözümlendiğinde küçük harfe dönüştürüldüğünden, doğrulama yaparken adresi tamamen küçük harf olarak geçmelisiniz + // Assert that a dataSource with address "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" was created + // Keep in mind that `Address` type is transformed to lower case when decoded, so you have to pass the address as all lower case when asserting if it exists assert.dataSourceExists('GraphTokenLockWallet', '0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'.toLowerCase()) logDataSources('GraphTokenLockWallet') }) ``` -##### `logDataSource` çıktısı örneği +##### Example `logDataSource` output ```bash 🛠 { @@ -1158,11 +1158,11 @@ test('ethereum/sözleşme dataSource oluşturma örneği', () => { } ``` -#### `dosya/ipfs` şemalarını test etme +#### Testing `file/ipfs` templates -Akıllı sözleşme dinamik veri kaynaklarına benzer şekilde kullanıcılar, test dosyası veri kaynaklarını ve bunların işleyicilerini test edebilirler +Similarly to contract dynamic data sources, users can test test file data sources and their handlers -##### `subgraph.yaml` örneği +##### Example `subgraph.yaml` ```yaml ... @@ -1183,7 +1183,7 @@ templates: file: ./abis/GraphTokenLockWallet.json ``` -##### `schema.graphql` örneği +##### Example `schema.graphql` ```graphql """ @@ -1203,7 +1203,7 @@ type TokenLockMetadata @entity { } ``` -##### `metadata.json` örneği +##### Example `metadata.json` ```json { @@ -1218,10 +1218,9 @@ type TokenLockMetadata @entity { ```typescript export function handleMetadata(content: Bytes): void { - // dataSource.stringParams() File Veri Kaynağı CID'sini döndürür - // stringParam() işleyici testinde taklit edilecektir - // daha fazla bilgi için https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files - process - files + // dataSource.stringParams() returns the File DataSource CID + // stringParam() will be mocked in the handler test + // for more info https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files let tokenMetadata = new TokenLockMetadata(dataSource.stringParam()) const value = json.fromBytes(content).toObject() @@ -1253,32 +1252,32 @@ import { handleMetadata } from '../../src/token-lock-wallet' import { TokenLockMetadata } from '../../generated/schema' import { GraphTokenLockMetadata } from '../../generated/templates' -test('dosya/ipfs veri kaynağı oluşturma örneği', () => { - // ipfsHash + ipfs dosya yolu kullanılarak veri kaynağı CID'si oluşturulur - // Örneğin QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json +test('file/ipfs dataSource creation example', () => { + // Generate the dataSource CID from the ipfsHash + ipfs path file + // For example QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' const CID = `${ipfshash}/example.json` - // Oluşturulan CID kullanılarak yeni bir dataSource oluşturulur + // Create a new dataSource using the generated CID GraphTokenLockMetadata.create(CID) - // dataSource oluşturulduğunu doğrula + // Assert the dataSource has been created assert.dataSourceCount('GraphTokenLockMetadata', 1) assert.dataSourceExists('GraphTokenLockMetadata', CID) logDataSources('GraphTokenLockMetadata') - // Şimdi dataSource metadatasını ve spesifik olarak dataSource.stringParam()'ı taklit etmemiz gerekiyor - // dataSource.stringParams aslında dataSource.address() değerini kullanır, bu yüzden matchstick-as kütüphanesinden aldığımız dataSourceMock ile adresi taklit edeceğiz - // İlk olarak değerleri sıfırlayıp ardından dataSourceMock.setAddress() ile CID'yi atayacağız + // Now we have to mock the dataSource metadata and specifically dataSource.stringParam() + // dataSource.stringParams actually uses the value of dataSource.address(), so we will mock the address using dataSourceMock from matchstick-as + // First we will reset the values and then use dataSourceMock.setAddress() to set the CID dataSourceMock.resetValues() dataSourceMock.setAddress(CID) - // Şimdi dataSource işleyicisine geçirmek için Bytes oluşturmalıyız - // Bunun yerel bir json dosyasını okuyan ve içeriğini Bytes olarak döndüren readFile fonksiyonunu kullanıyoruz + // Now we need to generate the Bytes to pass to the dataSource handler + // For this case we introduced a new function readFile, that reads a local json and returns the content as Bytes const content = readFile(`path/to/metadata.json`) handleMetadata(content) - // Şimdi bir TokenLockMetadata'nın oluşturulup oluşturulmadığını test edeceğiz + // Now we will test if a TokenLockMetadata was created const metadata = TokenLockMetadata.load(CID) assert.bigIntEquals(metadata!.endTime, BigInt.fromI32(1)) @@ -1290,29 +1289,29 @@ test('dosya/ipfs veri kaynağı oluşturma örneği', () => { ## Test Kapsamı -Subgraph geliştiricileri **Matchstick'i** kullanarak, yazılan birim testlerinin test kapsamını hesaplayacak bir komut dosyası çalıştırabilirler. +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -Test kapsama aracı derlenmiş test `wasm` ikililerini alır ve bunları daha sonra `subgraph.yaml` dosyasında tanımlanan işlevlerin çağrılıp çağrılmadığını görmek için kolayca incelenebilen `wat` dosyalarına dönüştürür. Kod kapsamı (ve bir bütün olarak test) AssemblyScript ve WebAssembly'de çok erken aşamalarda olduğundan, **Matchstick** dallanma kapsamını kontrol edemez. Bunun yerine, belirli bir işleyici çağrılmışsa, bunun için olay/fonksiyonun uygun şekilde taklit edildiği savına güveniyoruz. +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### Ön Koşullar +### Prerequisites -**Matchstick** tarafından sağlanan test kapsama fonksiyonlarını çalıştırmak için önceden hazırlamanız gereken birkaç şey bulunmaktadır: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### İşleyicilerinizi dışa aktarın -**Matchstick**'in hangi işleyicilerin çalıştığını kontrol etmesi için, bu işleyicilerin **test file**'dan dışa aktarılması gerekir. Mesela, bizim örneğimizde gravity.test.ts dosyamızda aşağıdaki işleyici içe aktarılır: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -Bu fonksiyonun görünür olması için (**adıyla** `wat` dosyasına dahil edilmesi için) ayrıca onuda şöyle dışa aktarmamız gerekmektedir: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### Kullanış +### Usage Her şey hazır olduğunda, test kapsama aracını çalıştırmak için basitçe şunu çalıştırın: @@ -1320,7 +1319,7 @@ Her şey hazır olduğunda, test kapsama aracını çalıştırmak için basitç graph test -- -c ``` -Ayrıca `package.json` dosyanıza şu şekilde özel bir kapsama(`coverage`) komutu ekleyebilirsiniz: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1378,7 +1377,7 @@ Tutulan kayıt çıktısı test çalışma süresini içerir. İşte buna bir ö > Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined -Bu hata kodunuzda `console.log` kullandığınız anlamına gelir. Bu komut AssemblyScript tarafından desteklenmez. Lütfen [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api)'yi kullanmayı düşünün. +This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api) > ERROR TS2554: Expected ? arguments, but got ?. > @@ -1392,7 +1391,7 @@ Bu hata kodunuzda `console.log` kullandığınız anlamına gelir. Bu komut Asse > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -Argümanlardaki uyumsuzluk, `graph-ts` ve `matchstick-as` arasındaki uyumsuzluktan kaynaklanır. Bu gibi sorunları düzeltmenin en iyi yolu her şeyi en son yayınlanan sürüme güncellemektir. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. ## Ek Kaynaklar From d523b9e923129776bbc257b6da69e6c123be920a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:15 -0500 Subject: [PATCH 0210/1534] New translations unit-testing-framework.mdx (Ukrainian) --- .../creating/unit-testing-framework.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/uk/subgraphs/developing/creating/unit-testing-framework.mdx index 591b55feda38..78df2c601459 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1393,7 +1393,7 @@ This means you have used `console.log` in your code, which is not supported by A The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. -## Additional Resources +## Додаткові матеріали For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). From 67dea8c3abde9e5faedb059387f68765b71f7a84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:17 -0500 Subject: [PATCH 0211/1534] New translations unit-testing-framework.mdx (Chinese Simplified) --- .../creating/unit-testing-framework.mdx | 118 +++++++++--------- 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/zh/subgraphs/developing/creating/unit-testing-framework.mdx index b077795de1d4..fb9703c0fdff 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: 单元测试框架 --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew install postgresql ``` -创建到最新 libpq.5. lib* 的符号链接,可能需要首先创建这个目录*`/usr/local/opt/postgreql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -58,16 +58,16 @@ static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = 或者 ``` ->/node_modules/gluegun/build/index.js:13 抛出; +/node_modules/gluegun/build/index.js:13 throw up; ``` -请确保您使用的是新版本的 Node.js graph-cli 不再支持 **v10.19.0**,而且这仍然是 WSL 上新 Ubuntu 映像的默认版本。例如,已经证实Matchstick 可以在使用 **v18.1.0**的 WSL 上工作,您可以通过 **nvm** 或者更新全局 Node.js 切换到 Matchstick。不要忘记删除 `node _ module`,并在更新 nodejs 之后再次运行 `npm install`!然后,确保已经安装了 **libpq**,可以通过运行 +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -最后,不要使用`graph test` (使用全局安装的 graph-cli,并且由于某些原因,它看起来像是在 WSL 上已经坏掉了) ,而是使用`yarn test` 或 `npm run test`(这将使用本地的项目级的 graph-cli 实例,它的工作原理非常有趣。)为此,您当然需要在 `package.json` 文件中有一个`“ test”`脚本,它可以简单到 +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ sudo apt-get install libpq-dev ### Using Matchstick -要在子图项目中使用**Matchstick**,只需打开一个终端,导航到项目的根文件夹,然后简单地运行`graph test [options] `-它下载最新的**Matchstick**二进制文件,并在测试文件夹中运行指定的测试或所有测试(如果未指定数据源标志,则运行所有现有测试)。 +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI 选项 @@ -109,7 +109,7 @@ graph test Gravity graph test path/to/file.test.ts ``` -**选项** +**Options:** ```sh -c, --coverage Run the tests in coverage mode @@ -123,21 +123,21 @@ graph test path/to/file.test.ts ### Docker -从`graph cli 0.25.2`中,`graph test`命令支持在带有`-d`标志的docker容器中运行`matchstick` 。docker实现使用[bind mount](https://docs.docker.com/storage/bind-mounts/),因此它不必在每次执行`graph test-d`命令时重新构建docker映像。或者,您可以按照[matchstick](https://github.com/LimeChain/matchstick#docker-)存储库中的说明手动运行docker。 +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. ❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗如果您以前运行过`graph test`,则在docker构建过程中可能会遇到以下错误: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -在本例中,在根文件夹中创建一个`.dockerignore`,并添加`node_modules/bibinary install raw/bin`。 +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### 配置 -Matchstick可以通过`Matchstick.yaml`配置文件配置为使用自定义测试、库和清单路径: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,11 +147,11 @@ manifestPath: path/to/subgraph.yaml ### 演示子图 -您可以通过克隆[Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph)来尝试并使用本指南中的示例。 +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### 视频教程 -此外,您还可以查看[“如何使用Matchstick为子图编写单元测试”系列视频](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h)。 +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -159,11 +159,11 @@ _**IMPORTANT: The test structure described below depens on `matchstick-as` versi ### 描述() -`describe(name: String , () => {})` - 定义测试组。 +`describe(name: String , () => {})` - Defines a test group. -**_注意:_** +**_Notes:_** -- _描述不是强制性的。您仍然可以在describe()区块之外以旧的方式使用test()_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ 例子: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -嵌套`describe()` 示例: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### 测试() -`test(name: String, () =>, should_fail: bool)` -定义测试用例。您可以在describe()区块内部或独立使用test()。 +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. 例子: @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -在文件中的任何测试之前运行代码区块。如果`beforeAll`在`描述`区块内声明,它将在该`描述`区块的开头运行。 +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. 例子: -`beforeAll`中的代码将在文件中的*all*测试之前执行一次。 +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -`beforeAll`中的代码将在第一个描述区块中的所有测试之前执行一次 +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -在文件中的所有测试之后运行代码区块。如果`afterAll`在`describe`区块内声明,它将在该`describe`区块的末尾运行。 +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. 例子: -`afterAll`中的代码将在文件中的*all*测试之后执行一次。 +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -`afterAll`中的代码将在第一个描述区块中的所有测试之后执行一次 +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -在每次测试之前运行代码块。如果`beforeEach`在`describe`区块中声明,则它在该`describe`块中的每个测试之前运行。 +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -示例:`beforeEach`内部的代码将在每次测试之前执行。 +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -`beforeEach`中的代码将仅在描述中的每个测试之前执行 +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -在每次测试后运行代码区块。如果`afterEach`在`describe` 区块中声明,则在该`describe` 区块中的每个测试之后运行。 +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. 例子: -`afterEach`内部的代码将在每次测试后执行。 +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -459,7 +459,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -`AfterEach`中的代码将仅在描述中的每个测试之后执行 +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -565,7 +565,7 @@ assert.dataSourceExists( ## 编写一个单元测试 -让我们看看使用[Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts)中的Gravatar示例进行简单的单元测试的样子。 +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). 假设我们有以下处理程序函数(以及两个帮助函数,以使我们的生活更轻松): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -652,13 +652,13 @@ test('Next test', () => { }) ``` -这太多了!首先,需要注意的一件重要事情是,我们将从`matchstick-as`中导入东西,作为我们的AssemblyScript助手库(作为npm模块分发)。您可以在[此处](https://github.com/LimeChain/matchstick-as)找到存储库。`matchstick as`为我们提供了有用的测试方法,还定义了我们将用来构建测试块的`test()`函数。剩下的部分很简单——下面是发生的事情: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - 我们正在设置我们的初始状态并添加一个自定义的 Gravatar 实体。 -- 我们使用`createNewGravatarEvent()`函数定义了两个`NewGravatar`事件对象以及它们的数据。 -- 我们正在为这些事件调用处理方法-`-handleNewGravatars()`,并传入我们的自定义事件列表。 +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - 我们断定存储的状态。那是怎么实现的呢?- 我们传递一个实体类型和 id 的唯一组合。然后我们检查该实体的一个特定字段,并断定它具有我们期望的值。我们为我们添加到存储的初始 Gravatar 实体,以及当处理函数被调用时被添加的两个 Gravatar 实体都做这个。 -- 最后--我们用`clearStore()`清理存储,这样我们的下一个测试就可以从一个新的空存储对象开始。我们可以定义任意多的测试块。 +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. 好了,我们创建了第一个测试!👏 @@ -668,7 +668,7 @@ test('Next test', () => { 如果一切顺利,您应该会收到以下信息: -![Matchstick写着“所有测试都通过了!”](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## 常见测试场景 @@ -754,9 +754,9 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### 模拟IPFS文件(from matchstick 0.4.1) -用户可以使用`mockIpfsFile(hash, filePath)`函数模拟IPFS文件。该函数接受两个参数,第一个是IPFS文件hash/路径,第二个是本地文件的路径。 +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -注意:在测试`ipfs.map/ipfs.mapJSON`,时,必须从测试文件中导出回调函数,以便matchstck检测到它,如下面测试示例中的`processGravatar()` 函数: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: `.test.ts` file: @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -运行assert.fieldEquals()函数将检查给定字段是否与给定的预期值相等。如果值**不**相等,测试将失败,并输出错误消息。否则,测试将成功通过。 +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### 与事件元数据交互 -用户可以使用默认的交易元数据,该元数据可以通过使用`newMockEvent()`函数作为ethereum.Event返回。以下示例显示了如何读取/写入Event对象上的这些字段: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Read @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### 断定实体**不**在存储中 +### Asserting that an Entity is **not** in the store 用户可以断定实体在存储中不存在。该函数接受实体类型和id。如果实体实际上在存储中,测试将失败,并显示相关错误消息。以下是如何使用此功能的快速示例: @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### 测试动态数据源 -可以通过模拟dataSource命名空间的`context()`, `address()` 和 `network()` 函数的返回值来测试动态数据源。这些函数当前返回以下内容:`context()` -返回一个空实体(DataSourceContext),`address()`返回`0x0000000000000000000000000000000000000000`, `network()` - 返回`mainnet`。`create(...)` 和 `createWithContext(...)`函数被模拟为什么都不做,因此根本不需要在测试中调用它们。返回值的更改可以通过`matchstick-as` (版本 0.3.0+) 中`dataSourceMock`命名空间的函数来完成。 +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). 示例如下: @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1289,29 +1289,29 @@ test('file/ipfs dataSource creation example', () => { ## 测试覆盖率 -使用**Matchstick**,子图开发者可以运行一个脚本,计算编写的单元测试的测试覆盖率。 +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -测试覆盖工具非常简单——它接受编译后的测试`wasm`二进制文件并将其转换为`wat`文件,然后可以很容易地检查这些文件,以查看`submap.yaml`中定义的处理程序是否真正被调用。由于代码覆盖率(以及整个测试)在AssemblyScript和WebAssembly中处于非常早期的阶段,**Matchstick**无法检查分支覆盖率。相反,我们依赖于这样一个断定,即如果调用了给定的处理程序,那么它的事件/函数就会被正确地模拟。 +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### 先决条件 +### Prerequisites -要运行**Matchstick**中提供的测试覆盖功能,您需要事先准备以下几件事: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### 导出处理程序 -为了让**Matchstick**检查正在运行的处理程序,需要从**测试文件**中导出这些处理程序。例如,在我们的示例中,在gravity.test.ts文件中,我们导入了以下处理程序 +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -为了使该函数可见(使其包含在 `wat` 文件中**按名称**),我们还需要导出它,例如这: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### 使用方法 +### Usage 设置好后,要运行测试覆盖工具,只需运行: @@ -1319,7 +1319,7 @@ export { handleNewGravatar } graph test -- -c ``` -您还可以向`package.json`文件中添加自定义`覆盖率`命令,如下所示: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1371,7 +1371,7 @@ Global test coverage: 22.2% (2/9 handlers). 日志输出包括测试运行持续时间。下面是一个示例: -`[2022 年 3 月 31 日星期四 13:54:54 +0300] 程序执行时间:42.270 毫秒。` +`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` ## 常见编译器错误 @@ -1391,7 +1391,7 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -参数不匹配是由`graph-ts` and `matchstick-as`不匹配造成的。解决此类问题的最佳方法是将所有内容更新到最新发布的版本。 +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. ## 其他资源 From 20d83830603f4e6f854ae016d05c1b215fdf79a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:18 -0500 Subject: [PATCH 0212/1534] New translations unit-testing-framework.mdx (Urdu (Pakistan)) --- .../creating/unit-testing-framework.mdx | 116 +++++++++--------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/ur/subgraphs/developing/creating/unit-testing-framework.mdx index ecead46a151d..ba6feb650a07 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: یونٹ ٹیسٹنگ فریم ورک --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew install postgresql ``` -تازہ ترین libpq.5.lib کا ایک سملنک بنائیں _آپ کو پہلے یہ ڈائر بنانے کی ضرورت پڑسکتی ہے_ `/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = /node_modules/gluegun/build/index.js:13 throw up; ``` -براہ کرم یقینی بنائیں کہ آپ Node.js graph-cli کے نئے ورژن پر ہیں اب **v10.19.0** کو سپورٹ نہیں کرتا ہے، اور یہ اب بھی نئے Ubuntu کا ڈیفالٹ ورژن ہے۔ WSL پر تصاویر۔ مثال کے طور پر میچ اسٹک کے WSL پر **v18.1.0** کے ساتھ کام کرنے کی تصدیق ہوئی ہے، آپ **nvm** کے ذریعے اس پر سوئچ کر سکتے ہیں۔ > یا اگر آپ اپنے عالمی Node.js کو اپ ڈیٹ کرتے ہیں۔ `node_modules` کو حذف کرنا اور nodejs کو اپ ڈیٹ کرنے کے بعد دوبارہ `npm install` چلانا نہ بھولیں! پھر، یقینی بنائیں کہ آپ نے **libpq** انسٹال کر رکھا ہے، آپ اسے چلا کر کر سکتے ہیں +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -اور آخر میں، `graph test` کا استعمال نہ کریں (جو آپ کے graph-cli کی عالمی تنصیب کا استعمال کرتا ہے اور کسی وجہ سے ایسا لگتا ہے کہ یہ فی الحال WSL پر ٹوٹ گیا ہے)، اس کے بجائے `yarn test` کا استعمال کریں۔ یا `npm run test` (جو graph-cli کی مقامی، پروجیکٹ لیول مثال استعمال کرے گا، جو ایک دلکش کی طرح کام کرتا ہے)۔ اس کے لیے یقیناً آپ کو اپنی `package.json` فائل میں ایک `"test"` اسکرپٹ کی ضرورت ہوگی جو اتنی ہی آسان ہوسکتی ہے +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ sudo apt-get install libpq-dev ### Using Matchstick -اپنے سب گراف پروجیکٹ میں **Matchstick** استعمال کرنے کے لیے بس ایک ٹرمینل کھولیں، اپنے پروجیکٹ کے روٹ فولڈر پر جائیں اور بس `graph test [options] ` - یہ تازہ ترین **Matchstick** بائنری ڈاؤن لوڈ کرتا ہے اور مخصوص ٹیسٹ یا ٹیسٹ فولڈر میں تمام ٹیسٹ چلاتا ہے (یا تمام موجودہ ٹیسٹ اگر کوئی ڈیٹا سورس پرچم متعین نہیں ہے). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI کے اختیارات @@ -109,7 +109,7 @@ graph test gravity graph test path/to/file.test.ts ``` -**اختیارات:** +**Options:** ```sh -c, --coverage Run the tests in coverage mode @@ -123,21 +123,21 @@ graph test path/to/file.test.ts ### ڈوکر -`graph-cli 0.25.2` سے، `graph test` کمانڈ `-d` فلیگ کے ساتھ ڈوکر کنٹینر میں `matchstick` چلانے کی حمایت کرتی ہے۔ ڈوکر کا نفاذ [بائنڈ ماؤنٹ](https://docs.docker.com/storage/bind-mounts/) کا استعمال کرتا ہے لہذا جب بھی `graph test -d` کمانڈ پر عمل ہوتا ہے اسے ڈوکر امیج کو دوبارہ بنانے کی ضرورت نہیں ہے۔ متبادل طور پر آپ ڈوکر کو دستی طور پر چلانے کے لیے [matchstick](https://github.com/LimeChain/matchstick#docker-) ریپوزٹری کی ہدایات پر عمل کر سکتے ہیں. +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. ❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ اگر آپ نے پہلے `graph test` چلایا ہے تو آپ کو ڈوکر کی تعمیر کے دوران درج ذیل خرابی کا سامنا کرنا پڑ سکتا ہے: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -اس صورت میں روٹ فولڈر میں ایک `.dockerignore` بنائیں اور `node_modules/binary-install-raw/bin` شامل کریں +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### کنفیگریشن -Matchstick کو `matchstick.yaml` کنفگ فائل کے ذریعے اپنی مرضی کے ٹیسٹ، libs اور مینی فیسٹ پاتھ کو استعمال کرنے کے لیے کنفیگر کیا جا سکتا ہے: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,11 +147,11 @@ manifestPath: path/to/subgraph.yaml ### ڈیمو سب گراف -آپ [ڈیمو سب گراف ریپو](https://github.com/LimeChain/demo-subgraph) کو کلون کرکے اس گائیڈ سے مثالیں آزما سکتے ہیں اور کھیل سکتے ہیں +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### ویڈیو ٹیوٹوریلز -اس کے علاوہ آپ ["اپنے سب گرافس کے یونٹ ٹیسٹ لکھنے کے لیے Matchstick کا استعمال کیسے کریں"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) پر ویڈیو سیریز بھی دیکھ سکتے ہیں +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -159,11 +159,11 @@ _**IMPORTANT: The test structure described below depens on `matchstick-as` versi ### describe() -`describe(name: String , () => {})` - ٹیسٹ گروپ کی وضاحت کرتا ہے. +`describe(name: String , () => {})` - Defines a test group. -**_نوٹس:_** +**_Notes:_** -- _بیانات لازمی نہیں ہیں۔ آپ describe() بلاکس کے باہر اب بھی test() پرانا طریقہ استعمال کر سکتے ہیں_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ مثال: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -نیسٹڈ `describe()` مثال: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +203,7 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - ٹیسٹ کیس کی وضاحت کرتا ہے۔ آپ test() کو describe() بلاکس کے اندر یا آزادانہ طور پر استعمال کر سکتے ہیں. +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. مثال: @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -فائل میں کسی بھی ٹیسٹ سے پہلے ایک کوڈ بلاک چلاتا ہے۔ اگر `beforeAll` کو `describe` بلاک کے اندر اعلان کیا جاتا ہے، تو یہ اس `describe` بلاک کے شروع میں چلتا ہے. +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. مثالیں: -`foreAll` کے اندر کا کوڈ فائل میں _تمام_ ٹیسٹوں سے پہلے ایک بار عمل میں آئے گا. +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -`beforeAll` کے اندر کا کوڈ پہلے وضاحتی بلاک میں تمام ٹیسٹوں سے پہلے ایک بار عمل میں آئے گا +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -فائل میں تمام ٹیسٹوں کے بعد ایک کوڈ بلاک چلاتا ہے۔ اگر `AfterAll` کو `describe` بلاک کے اندر اعلان کیا جاتا ہے، تو یہ اس `describe` بلاک کے آخر میں چلتا ہے. +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. مثال: -`afterAll` کے اندر کا کوڈ فائل میں _تمام_ ٹیسٹوں کے بعد ایک بار عمل میں آئے گا. +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -`AfterAll` کے اندر کوڈ پہلے وضاحتی بلاک میں تمام ٹیسٹوں کے بعد ایک بار عمل میں آئے گا +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -ہر ٹیسٹ سے پہلے ایک کوڈ بلاک چلاتا ہے۔ اگر `beforeEach` کو `describe` بلاک کے اندر قرار دیا جاتا ہے، تو یہ اس `describe` بلاک میں ہر ٹیسٹ سے پہلے چلتا ہے. +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -مثالیں: `beforeEach` کے اندر کوڈ ہر ٹیسٹ سے پہلے عمل میں آئے گا. +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -`beforeEach` کے اندر کا کوڈ صرف ہر ٹیسٹ سے پہلے اس کی وضاحت کرے گا +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -ہر ٹیسٹ کے بعد کوڈ بلاک چلاتا ہے۔ اگر `afterEach` کو `describe` بلاک کے اندر قرار دیا جاتا ہے، تو یہ اس `describe` بلاک میں ہر ٹیسٹ کے بعد چلتا ہے. +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. مثالیں: -`afterEach`b کے اندر کا کوڈ ہر ٹیسٹ کے بعد چلے گا. +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -459,7 +459,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -`afterEach` کے اندر کا کوڈ ہر ٹیسٹ کے بعد describe میں چلے گا +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -565,7 +565,7 @@ assert.dataSourceExists( ## یونٹ ٹیسٹ لکھیں -آئیے دیکھتے ہیں کہ [ڈیمو سب گراف](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts) میں Gravatar کی مثالوں کا استعمال کرتے ہوئے ایک سادہ یونٹ ٹیسٹ کیسا لگے گا. +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). یہ فرض کرتے ہوئے کہ ہمارے پاس مندرجہ ذیل ہینڈلر فنکشن ہے (ہماری زندگی کو آسان بنانے کے لیے دو مددگار افعال کے ساتھ): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -652,23 +652,23 @@ test('Next test', () => { }) ``` -یہ پیک کھولنے کے لیے بہت کچھ ہے! سب سے پہلے، ایک اہم چیز جس کا نوٹس لیا جائے وہ یہ ہے کہ ہم اپنی اسمبلی اسکرپٹ مددگار لائبریری (npm ماڈیول کے طور پر تقسیم کردہ) `matchstick-as` سے چیزیں درآمد کر رہے ہیں۔ آپ ریپوزٹری کو [یہاں](https://github.com/LimeChain/matchstick-as) تلاش کر سکتے ہیں۔ `matchstick-as` ہمیں مفید جانچ کے طریقے فراہم کرتا ہے اور `test()` فنکشن کی بھی وضاحت کرتا ہے جسے ہم اپنے ٹیسٹ بلاکس بنانے کے لیے استعمال کریں گے۔ اس کا باقی حصہ بالکل سیدھا ہے - یہاں کیا ہوتا ہے: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - ہم اپنی ابتدائی حالت قائم کر رہے ہیں اور ایک حسب ضرورت Gravatar ہستی شامل کر رہے ہیں; -- ہم `createNewGravatarEvent()` فنکشن کا استعمال کرتے ہوئے، ان کے ڈیٹا کے ساتھ دو `NewGravatar` ایونٹ آبجیکٹ کی وضاحت کرتے ہیں; -- ہم ان واقعات کے لیے ہینڈلر کے طریقے بتا رہے ہیں - `handleNewGravatars()` اور ہمارے حسب ضرورت ایونٹس کی فہرست میں گزر رہے ہیں; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - ہم اسٹور کی حالت پر زور دیتے ہیں۔ یہ کیسے کام کرتا ہے؟ - ہم ہستی کی قسم اور آئی ڈی کا ایک انوکھا امتزاج پاس کر رہے ہیں۔ پھر ہم اس ہستی پر ایک مخصوص فیلڈ کو چیک کرتے ہیں اور اس بات پر زور دیتے ہیں کہ اس کی وہ قدر ہے جس کی ہم اس سے توقع رکھتے ہیں۔ ہم یہ دونوں ابتدائی Gravatar ہستی کے لیے کر رہے ہیں جسے ہم نے سٹور میں شامل کیا ہے، اور ساتھ ہی وہ دو Gravatar اداروں کے لیے جو ہینڈلر فنکشن کو کال کرنے پر شامل ہو جاتی ہیں; -- اور آخر میں - ہم `clearStore()` کا استعمال کرتے ہوئے سٹور کی صفائی کر رہے ہیں تاکہ ہمارا اگلا ٹیسٹ ایک تازہ اور خالی سٹور آبجیکٹ کے ساتھ شروع ہو سکے۔ ہم جتنے چاہیں ٹیسٹ بلاکس کی وضاحت کر سکتے ہیں. +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. ہم وہاں جاتے ہیں - ہم نے اپنا پہلا ٹیسٹ بنایا ہے! 👏 اب ہمارے ٹیسٹ چلانے کے لیے آپ کو اپنے سب گراف روٹ فولڈر میں درج ذیل کو چلانے کی ضرورت ہے: -`گراف ٹیسٹ گریوٹی` +`graph test Gravity` اور اگر سب کچھ ٹھیک رہا تو آپ کو درج ذیل کے ساتھ خوش آمدید کہا جانا چاہئے: -![Matchstick کہہ رہی ہے "تمام ٹیسٹ پاس ہو گئے!"](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## عام ٹیسٹ کے منظرنامے @@ -754,11 +754,11 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### IPFS files کو موک کرنا (from matchstick 0.4.1) -صارف `mockIpfsFile(hash, filePath)` فنکشن استعمال کرکے IPFS فائلوں کو موک کر سکتے ہیں۔ فنکشن دو دلائل کو قبول کرتا ہے، پہلا IPFS فائل ہیش/پاتھ ہے اور دوسرا مقامی فائل کا راستہ ہے. +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -نوٹ: `ipfs.map/ipfs.mapJSON` کی جانچ کرتے وقت، کال بیک فنکشن کو ٹیسٹ فائل سے ایکسپورٹ کیا جانا چاہیے تاکہ میچسٹک اس کا پتہ لگا سکے، جیسے کہ `processGravatar()` فنکشن ذیل میں ٹیسٹ کی مثال میں: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -`.test.ts` فائل: +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` فائل: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Assert.fieldEquals() فنکشن کو چلانے سے دی گئی فیلڈ کی دی گئی متوقع قدر کے مقابلے میں برابری کی جانچ ہوگی۔ ٹیسٹ ناکام ہو جائے گا اور اگر قدریں **NOT** برابر ہوں گی تو ایک خرابی کا پیغام نکلے گا۔ ورنہ امتحان کامیابی سے گزر جائے گا. +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### ایونٹ میٹا ڈیٹا کے ساتھ تعامل کرنا -صارفین ڈیفالٹ ٹرانزیکشن میٹا ڈیٹا استعمال کر سکتے ہیں، جسے ایتھریم کے طور پر واپس کیا جا سکتا ہے`newMockEvent()` ایونٹ کا استعمال کرنے ہوۓ ۔ درج ذیل مثال سے پتہ چلتا ہے کہ آپ ایونٹ آبجیکٹ پر ان فیلڈز کو کیسے پڑھ/لکھ سکتے ہیں: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Read @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### یہ دعویٰ کرنا کہ کوئی ہستی اسٹور میں **نہیں** ہے +### Asserting that an Entity is **not** in the store صارفین اس بات پر زور دے سکتے ہیں کہ اسٹور میں کوئی ہستی موجود نہیں ہے۔ فنکشن ایک ہستی کی قسم اور ایک شناخت لیتا ہے۔ اگر ہستی حقیقت میں اسٹور میں ہے تو، متعلقہ خرابی کے پیغام کے ساتھ ٹیسٹ ناکام ہو جائے گا۔ اس فعالیت کو استعمال کرنے کے طریقے کی ایک فوری مثال یہ ہے: @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### متحرک ڈیٹا کے ذرائع کی جانچ کرنا -متحرک ڈیٹا کے ذرائع کی جانچ `context()`، `address()` اور `network()` فنکشنز کی واپسی کی ویلیو کو موک کرتے ہوۓ کی جا سکتی ہے۔ ڈیٹا سورس نام کی جگہ۔ یہ فنکشنز فی الحال درج ذیل کو لوٹاتا ہے: `context()` - ایک خالی ہستی (DataSourceContext) واپس کرتا ہے، `address()` - `0x0000000000000000000000000000000000000000000000000000000000000` لوٹاتا ہے, `network()` - `mainnet` لوٹاتا ہے۔ `create(...)` اور `createWithContext(...)` فنکشنز کو موک کیا جاتا ہے کہ وہ کچھ نہ کریں اس لیے انہیں ٹیسٹوں میں بلانے کی ضرورت نہیں ہے۔ واپسی کی ویلیوس میں تبدیلیاں `matchstick-as` (ورژن 0.3.0+) میں `dataSourceMock` نام کی جگہ کے فنکشنز کے ذریعے کی جا سکتی ہیں. +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). ذیل کی مثال: @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1293,25 +1293,25 @@ Using **Matchstick**, subgraph developers are able to run a script that will cal The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### شرطیں +### Prerequisites -**Matchstick** میں فراہم کردہ ٹیسٹ کوریج کی فعالیت کو چلانے کے لیے، آپ کو پہلے سے کچھ چیزیں تیار کرنے کی ضرورت ہے: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### اپنے ہینڈلرز کو ایکسپورٹ کریں -**Matchstick** کو چیک کرنے کے لیے کہ کون سے ہینڈلرز چلائے جا رہے ہیں، ان ہینڈلرز کو **ٹیسٹ فائل** سے ایکسپورٹ کرنے کی ضرورت ہے۔. تو مثال کے طور پر، ہماری gravity.test.ts فائل میں ہمارے پاس درج ذیل ہینڈلر درآمد کیا جا رہا ہے: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -اس فنکشن کو نظر آنے کے لیے (اسے `wat` فائل میں شامل کیا جائے **نام سے**) ہمیں اسے برآمد بھی کرنا ہوگا، جیسے یہ: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### استعمال +### Usage ایک بار جب یہ سب سیٹ ہو جائے تو، ٹیسٹ کوریج ٹول کو چلانے کے لیے، بس چلائیں: @@ -1319,7 +1319,7 @@ export { handleNewGravatar } graph test -- -c ``` -آپ اپنی `package.json` فائل میں حسب ضرورت `کوریج` کمانڈ بھی شامل کرسکتے ہیں، جیسے: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1391,7 +1391,7 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -دلائل میں عدم مماثلت `graph-ts` اور `matchstick-as` میں عدم مماثلت کی وجہ سے ہوتی ہے۔ اس طرح کے مسائل کو حل کرنے کا بہترین طریقہ یہ ہے کہ ہر چیز کو تازہ ترین جاری کردہ ورژن میں اپ ڈیٹ کیا جائے. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. ## اضافی وسائل From f340064f992899d66856a3b18eb331559e716341 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:19 -0500 Subject: [PATCH 0213/1534] New translations unit-testing-framework.mdx (Vietnamese) --- .../creating/unit-testing-framework.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/vi/subgraphs/developing/creating/unit-testing-framework.mdx index 3867eaebfeee..10a1078a2eb5 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: Unit Testing Framework --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -328,10 +328,10 @@ import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/ind import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1293,7 +1293,7 @@ Using **Matchstick**, subgraph developers are able to run a script that will cal The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### Điều kiện tiên quyết +### Prerequisites To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: @@ -1311,7 +1311,7 @@ In order for that function to be visible (for it to be included in the `wat` fil export { handleNewGravatar } ``` -### Sử dụng +### Usage Once that's all set up, to run the test coverage tool, simply run: From 216327aae8ec04b622179c0efe8357a0b16bb488 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:20 -0500 Subject: [PATCH 0214/1534] New translations unit-testing-framework.mdx (Marathi) --- .../creating/unit-testing-framework.mdx | 172 +++++++++--------- 1 file changed, 86 insertions(+), 86 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/mr/subgraphs/developing/creating/unit-testing-framework.mdx index 8cb83f7735ae..e09a384b8e6d 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/unit-testing-framework.mdx @@ -2,7 +2,7 @@ title: युनिट चाचणी फ्रेमवर्क --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick @@ -33,7 +33,7 @@ Installation command: brew install postgresql ``` -नवीनतम libpq.5.lib साठी एक सिमलिंक तयार करा _तुम्हाला प्रथम हे dir तयार करावे लागेल_ `/usr/local/opt/postgresql/lib/` +Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -61,13 +61,13 @@ static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = /node_modules/gluegun/build/index.js:13 throw up; ``` -कृपया खात्री करा की तुम्ही Node.js graph-cli च्या नवीन आवृत्तीवर आहात आता **v10.19.0** ला सपोर्ट करत नाही आणि WSL वरील नवीन Ubuntu प्रतिमांसाठी ती डीफॉल्ट आवृत्ती आहे. उदाहरणार्थ मॅचस्टिक **v18.1.0** सह WSL वर काम करत असल्याची पुष्टी झाली आहे, तुम्ही त्यावर **nvm** द्वारे किंवा तुम्ही तुमचे ग्लोबल Node.js अपडेट केल्यास त्यावर स्विच करू शकता. तुम्हाला नोडज अपडेट केल्यानंतर `node_modules` हटवायला आणि `npm install` पुन्हा चालवायला विसरू नका! त्यानंतर, तुमच्याकडे **libpq** स्थापित असल्याची खात्री करा, तुम्ही ते चालवून करू शकता +Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running ``` sudo apt-get install libpq-dev ``` -आणि शेवटी, `ग्राफ चाचणी` वापरू नका (जे तुमचे ग्राफ-क्लीचे जागतिक इंस्टॉलेशन वापरते आणि काही कारणास्तव ते सध्या WSL वर तुटलेले दिसते), त्याऐवजी `यार्न टेस्ट` वापरा. किंवा `npm रन चाचणी` (जे ग्राफ-क्लीचे स्थानिक, प्रकल्प-स्तरीय उदाहरण वापरेल, जे मोहिनीसारखे कार्य करते). त्यासाठी तुम्हाला तुमच्या `package.json` फाईलमध्ये अर्थातच एक `"test"` स्क्रिप्ट असणे आवश्यक आहे जे सोपे असू शकते +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +87,7 @@ sudo apt-get install libpq-dev ### Using Matchstick -तुमच्या सबग्राफ प्रोजेक्टमध्ये **Matchstick** वापरण्यासाठी फक्त एक टर्मिनल उघडा, तुमच्या प्रोजेक्टच्या रूट फोल्डरवर नेव्हिगेट करा आणि फक्त `ग्राफ चाचणी [options] ` - ते नवीनतम **Matchstick** बायनरी डाउनलोड करते आणि चाचणी फोल्डरमध्ये निर्दिष्ट चाचणी किंवा सर्व चाचण्या चालवते (किंवा कोणताही डेटासोर्स ध्वज निर्दिष्ट केलेला नसल्यास सर्व विद्यमान चाचण्या). +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### CLI पर्याय @@ -109,7 +109,7 @@ graph test gravity graph test path/to/file.test.ts ``` -**पर्याय:** +**Options:** ```sh -c, --coverage Run the tests in coverage mode @@ -123,21 +123,21 @@ graph test path/to/file.test.ts ### डॉकर -`graph-cli 0.25.2` वरून, `ग्राफ चाचणी` कमांड `-d` सह डॉकर कंटेनरमध्ये `matchstick` चालवण्यास समर्थन देते > ध्वज. डॉकर अंमलबजावणी [बाइंड माउंट](https://docs.docker.com/storage/bind-mounts/) वापरते त्यामुळे प्रत्येक वेळी `ग्राफवर डॉकर प्रतिमा पुन्हा तयार करावी लागत नाही test -d` कमांड कार्यान्वित केली आहे. वैकल्पिकरित्या तुम्ही डॉकर मॅन्युअली चालवण्यासाठी [matchstick](https://github.com/LimeChain/matchstick#docker-) रेपॉजिटरीमधील सूचनांचे अनुसरण करू शकता. +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. ❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ जर तुम्ही यापूर्वी `ग्राफ चाचणी` केली असेल तर तुम्हाला डॉकर बिल्ड दरम्यान खालील त्रुटी येऊ शकते: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh - प्रेषकाकडून त्रुटी: xattr node_modules/binary-install-raw/bin/binary करण्यात अयशस्वी-: परवानगी नाकारली + error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -या प्रकरणात रूट फोल्डरमध्ये `.dockerignore` तयार करा आणि `node_modules/binary-install-raw/bin` जोडा +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### कॉन्फिगरेशन -मॅचस्टिक `matchstick.yaml` कॉन्फिगर फाइलद्वारे सानुकूल चाचण्या, libs आणि मॅनिफेस्ट पथ वापरण्यासाठी कॉन्फिगर केले जाऊ शकते: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,11 +147,11 @@ manifestPath: path/to/subgraph.yaml ### डेमो सबग्राफ -तुम्ही [डेमो सबग्राफ रेपो](https://github.com/LimeChain/demo-subgraph) क्लोन करून या मार्गदर्शकातील उदाहरणे वापरून पाहू शकता आणि खेळू शकता +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### व्हिडिओ ट्यूटोरियल -तसेच तुम्ही ["तुमच्या सबग्राफसाठी युनिट चाचण्या लिहिण्यासाठी मॅचस्टिक कसे वापरावे"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) वर व्हिडिओ मालिका पाहू शकता +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -161,9 +161,9 @@ _**IMPORTANT: The test structure described below depens on `matchstick-as` versi `describe(name: String , () => {})` - Defines a test group. -**_नोट्स:_** +**_Notes:_** -- _वर्णने अनिवार्य नाहीत. तुम्ही describe() ब्लॉक्सच्या बाहेर test() जुन्या पद्धतीने अजूनही वापरू शकता_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ उदाहरण: @@ -178,7 +178,7 @@ describe("handleNewGravatar()", () => { }) ``` -नेस्टेड `describe()` उदाहरण: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -232,11 +232,11 @@ test("handleNewGravatar() should create a new entity", () => { ### आधी सर्व() -फाइलमधील कोणत्याही चाचण्यांपूर्वी कोड ब्लॉक चालवते. `describe` ब्लॉकच्या आत `before All` घोषित केले असल्यास, ते त्या `describe` ब्लॉकच्या सुरुवातीला चालते. +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. उदाहरणे: -`आधी सर्व` मध्ये कोड फाइलमधील _सर्व_ चाचण्यांपूर्वी एकदा कार्यान्वित होईल. +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +263,7 @@ describe("When entity already exists", () => { }) ``` -`foreAll` मधील कोड पहिल्या वर्णन ब्लॉकमधील सर्व चाचण्यांपूर्वी एकदा कार्यान्वित होईल +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +292,11 @@ describe("handleUpdatedGravatar()", () => { ### शेवटी() -फाइलमधील सर्व चाचण्यांनंतर कोड ब्लॉक चालवते. `वर्णन` ब्लॉकच्या आत `आफ्टरऑल` घोषित केले असल्यास, ते त्या `वर्णन` ब्लॉकच्या शेवटी चालते. +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. उदाहरण: -`आफ्टरऑल` मध्ये कोड फाइलमधील _सर्व_ चाचण्यांनंतर एकदा कार्यान्वित होईल. +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +321,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -`afterAll` मधील कोड पहिल्या वर्णन ब्लॉकमधील सर्व चाचण्यांनंतर एकदाच कार्यान्वित होईल +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +353,9 @@ describe("handleUpdatedGravatar", () => { ### प्रत्येकाच्या आधी() -प्रत्येक चाचणीपूर्वी कोड ब्लॉक चालवते. `describe` ब्लॉकच्या आत `beforeEach` घोषित केले असल्यास, ते त्या `describe` ब्लॉकमधील प्रत्येक चाचणीपूर्वी चालते. +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -उदाहरणे: प्रत्येक चाचण्यांपूर्वी `beforeEach` मधील कोड कार्यान्वित होईल. +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +378,7 @@ describe("handleNewGravatars, () => { ... ``` -`beforeEach` मधील कोड वर्णन केलेल्या प्रत्येक चाचणीपूर्वीच कार्यान्वित होईल +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,7 +392,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -416,11 +416,11 @@ describe('handleUpdatedGravatars', () => { ### प्रत्येक नंतर() -प्रत्येक चाचणीनंतर कोड ब्लॉक चालवते. `describe` ब्लॉकमध्ये `afterEach` घोषित केले असल्यास, ते त्या `describe` ब्लॉकमधील प्रत्येक चाचणीनंतर चालते. +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. उदाहरणे: -प्रत्येक चाचणीनंतर `afterEach` मधील कोड कार्यान्वित होईल. +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,7 +441,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -459,7 +459,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -`afterEach` मधील कोड त्या वर्णनातील प्रत्येक चाचणीनंतर कार्यान्वित होईल +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -565,7 +565,7 @@ assert.dataSourceExists( ## एक युनिट चाचणी लिहा -[डेमो सबग्राफ](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts) मधील Gravatar उदाहरणे वापरून एक साधी युनिट चाचणी कशी दिसते ते पाहू. +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). असे गृहीत धरून की आमच्याकडे खालील हँडलर फंक्शन आहे (आपले जीवन सोपे करण्यासाठी दोन मदतनीस कार्यांसह): @@ -603,7 +603,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -652,23 +652,23 @@ test('Next test', () => { }) ``` -अनपॅक करण्यासाठी ते खूप आहे! सर्वप्रथम, लक्षात घेण्यासारखी महत्त्वाची गोष्ट म्हणजे आम्ही आमच्या असेंबलीस्क्रिप्ट हेल्पर लायब्ररी (npm मॉड्यूल म्हणून वितरित) `matchstick-as` मधून गोष्टी आयात करत आहोत. तुम्ही [येथे](https://github.com/LimeChain/matchstick-as) भांडार शोधू शकता. `matchstick-as` आम्हाला उपयुक्त चाचणी पद्धती प्रदान करते आणि `test()` फंक्शन देखील परिभाषित करते जे आम्ही आमचे चाचणी ब्लॉक तयार करण्यासाठी वापरू. बाकीचे अगदी सरळ आहे - काय होते ते येथे आहे: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - आम्ही आमची प्रारंभिक स्थिती सेट करत आहोत आणि एक कस्टम Gravatar अस्तित्व जोडत आहोत; -- आम्ही `createNewGravatarEvent()` फंक्शन वापरून दोन `NewGravatar` इव्हेंट ऑब्जेक्ट्स त्यांच्या डेटासह परिभाषित करतो; -- आम्ही त्या इव्हेंटसाठी हँडलर पद्धती कॉल करत आहोत - `handleNewGravatars()` आणि आमच्या सानुकूल इव्हेंटच्या सूचीमध्ये पास करत आहोत; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - आम्ही स्टोअरच्या स्थितीवर ठाम आहोत. ते कसे कार्य करते? - आम्ही अस्तित्व प्रकार आणि आयडीचे एक अद्वितीय संयोजन पास करत आहोत. मग आम्ही त्या घटकावरील विशिष्ट फील्ड तपासतो आणि असे प्रतिपादन करतो की तिच्याकडे अपेक्षित मूल्य आहे. आम्ही स्टोअरमध्ये जोडलेल्या सुरुवातीच्या Gravatar एंटिटीसाठी तसेच हँडलर फंक्शन कॉल केल्यावर जोडल्या जाणार्‍या दोन Gravatar घटकांसाठी आम्ही हे करत आहोत; -- आणि शेवटी - आम्ही `clearStore()` वापरून स्टोअर साफ करत आहोत जेणेकरून आमची पुढील चाचणी नवीन आणि रिकाम्या स्टोअर ऑब्जेक्टसह सुरू होईल. आम्हाला पाहिजे तितके चाचणी ब्लॉक्स आम्ही परिभाषित करू शकतो. +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. आम्ही तिथे जातो - आम्ही आमची पहिली चाचणी तयार केली आहे! 👏 आता आमच्या चाचण्या चालवण्यासाठी तुम्हाला तुमच्या सबग्राफ रूट फोल्डरमध्ये खालील गोष्टी चालवाव्या लागतील: -`आलेख चाचणी गुरुत्वाकर्षण` +`graph test Gravity` आणि जर सर्व काही ठीक झाले तर तुम्हाला पुढील गोष्टींसह स्वागत केले पाहिजे: -![“सर्व चाचण्या पास झाल्या!” म्हणणारी मॅचस्टिक](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## सामान्य चाचणी परिस्थिती @@ -754,11 +754,11 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### IPFS फाइल्सची थट्टा उडवणारी (माचस्टिक ०. ४. १ वरून) -वापरकर्ते `mockIpfsFile(hash, filePath)` फंक्शन वापरून IPFS फाइल्सची थट्टा करू शकतात. फंक्शन दोन आर्ग्युमेंट्स स्वीकारते, पहिला आयपीएफएस फाइल हॅश/पथ आणि दुसरा स्थानिक फाइलचा मार्ग आहे. +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -टीप: `ipfs.map/ipfs.mapJSON` ची चाचणी करताना, मॅचस्टॅकने `processGravatar()` फंक्शन सारखे शोधण्यासाठी चाचणी फाइलमधून कॉलबॅक फंक्शन एक्सपोर्ट केले पाहिजे. खालील चाचणी उदाहरणामध्ये: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -`.test.ts` फाइल: +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' @@ -795,7 +795,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` फाइल: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +857,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Assert.fieldEquals() फंक्शन चालवल्याने दिलेल्या फील्डची दिलेल्या अपेक्षित मूल्याविरुद्ध समानता तपासली जाईल. चाचणी अयशस्वी होईल आणि मूल्ये **नाही** समान असल्यास एक त्रुटी संदेश आउटपुट केला जाईल. अन्यथा चाचणी यशस्वीरित्या उत्तीर्ण होईल. +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### इव्हेंट मेटाडेटासह संवाद साधत आहे -वापरकर्ते डीफॉल्ट व्यवहार मेटाडेटा वापरू शकतात, जो इथरियम म्हणून परत केला जाऊ शकतो. `newMockEvent()` फंक्शन वापरून इव्हेंट. खालील उदाहरण इव्हेंट ऑब्जेक्टवरील त्या फील्डवर तुम्ही कसे वाचू/लिहू शकता हे दाखवते: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Read @@ -878,7 +878,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### एखादी संस्था स्टोअरमध्ये **नाही** असल्याचे ठासून सांगणे +### Asserting that an Entity is **not** in the store वापरकर्ते असे ठामपणे सांगू शकतात की स्टोअरमध्ये अस्तित्व नाही. फंक्शन एक अस्तित्व प्रकार आणि आयडी घेते. वस्तुस्थिती स्टोअरमध्ये असल्यास, चाचणी संबंधित त्रुटी संदेशासह अयशस्वी होईल. ही कार्यक्षमता कशी वापरायची याचे एक द्रुत उदाहरण येथे आहे: @@ -1040,7 +1040,7 @@ describe('loadInBlock', () => { ### डायनॅमिक डेटा स्रोतांची चाचणी -डायनॅमिक डेटा स्रोतांची चाचणी `context()`, `address()` आणि `network()` फंक्शन्सच्या रिटर्न व्हॅल्यूची थट्टा करून केली जाऊ शकते. डेटास्रोत नेमस्पेस. ही फंक्शन्स सध्या खालील गोष्टी परत करतात: `संदर्भ()` - रिक्त अस्तित्व (डेटास्रोत संदर्भ), `पत्ता()` परत करते - `0x00000000000000000000000000000000000000`, `नेटवर्क() - ` `मेननेट` परत करतो.`create(...)` आणि `createWithContext(...)` फंक्शन्सची थट्टा केली जाते की काहीही करू नये म्हणून त्यांना चाचणीमध्ये बोलावण्याची गरज नाही. रिटर्न व्हॅल्यूजमधील बदल `matchstick-as` मधील `dataSourceMock` नेमस्पेसच्या फंक्शन्सद्वारे केले जाऊ शकतात (आवृत्ती 0.3.0+). +Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). खालील उदाहरण: @@ -1160,7 +1160,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1293,25 +1293,25 @@ Using **Matchstick**, subgraph developers are able to run a script that will cal The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### पूर्वतयारी +### Prerequisites -**Matchstick** मध्ये प्रदान केलेली चाचणी कव्हरेज कार्यक्षमता चालवण्यासाठी, तुम्हाला काही गोष्टी आधीच तयार करणे आवश्यक आहे: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### तुमचे हँडलर निर्यात करा -कोणते हँडलर चालवले जात आहेत हे तपासण्यासाठी **मॅचस्टिक** साठी, त्या हँडलरना **चाचणी फाइल** मधून निर्यात करणे आवश्यक आहे. उदाहरणार्थ, आमच्या उदाहरणात, आमच्या gravity.test.ts फाइलमध्ये आमच्याकडे खालील हँडलर आयात केले जात आहे: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript '../../src/gravity' वरून { handleNewGravatar } आयात करा ``` -ते कार्य दृश्यमान होण्यासाठी (ते `wat` फाइल **नावाने** मध्ये समाविष्ट करण्यासाठी) आम्हाला ते निर्यात देखील करावे लागेल, जसे: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript निर्यात { handleNewGravatar } ``` -### वापर +### Usage एकदा ते सर्व सेट झाल्यानंतर, चाचणी कव्हरेज साधन चालविण्यासाठी, फक्त चालवा: @@ -1319,7 +1319,7 @@ The test coverage tool takes the compiled test `wasm` binaries and converts them आलेख चाचणी-- -c ``` -तुम्ही तुमच्या `package.json` फाईलमध्ये सानुकूल `कव्हरेज` कमांड देखील जोडू शकता, जसे की: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1331,47 +1331,47 @@ The test coverage tool takes the compiled test `wasm` binaries and converts them That will execute the coverage tool and you should see something like this in the terminal: ```sh -$ आलेख चाचणी -c -डाउनलोड/इंस्टॉल चरण वगळणे कारण /Users/petko/work/demo-subgraph/node_modules/binary-install-raw/bin/0.4.0 वर बायनरी आधीपासूनच अस्तित्वात आहे +$ graph test -c +Skipping download/install step because binary already exists at /Users/petko/work/demo-subgraph/node_modules/binary-install-raw/bin/0.4.0 -___ ___ _ _ _ _ _ -| \/ | | | | | | | (_) | | -| . . | __ _| |_ ___| |__ ___| |_ _ ___| | __ -| |\/| |/ _` | __/ __| '_ \/ __| __| |/ __| |// -| | | | (_| | || (__| | | \__ \ |_| | (__| < -\_| |_/\__,__|\__\___|__| |_|___/\__|_|\___|_|\_\ +___ ___ _ _ _ _ _ +| \/ | | | | | | | (_) | | +| . . | __ _| |_ ___| |__ ___| |_ _ ___| | __ +| |\/| |/ _` | __/ __| '_ \/ __| __| |/ __| |/ / +| | | | (_| | || (__| | | \__ \ |_| | (__| < +\_| |_/\__,_|\__\___|_| |_|___/\__|_|\___|_|\_\ -संकलित करत आहे... +Compiling... -कव्हरेज रिपोर्ट मोडमध्ये चालत आहे. +Running in coverage report mode. ️ -व्युत्पन्न चाचणी मॉड्यूल्स वाचत आहे... 🔎️ +Reading generated test modules... 🔎️ -कव्हरेज अहवाल व्युत्पन्न करत आहे 📝 +Generating coverage report 📝 -'ग्रॅव्हिटी' स्त्रोतासाठी हँडलर: -हँडलर 'हँडलन्यूग्रावतार' चाचणी केली जाते. -हँडलर 'handleUpdatedGravatar' ची चाचणी केलेली नाही. -हँडलर 'handleCreateGravatar' चाचणी केली आहे. -चाचणी कव्हरेज: 66.7% (2/3 हँडलर). +Handlers for source 'Gravity': +Handler 'handleNewGravatar' is tested. +Handler 'handleUpdatedGravatar' is not tested. +Handler 'handleCreateGravatar' is tested. +Test coverage: 66.7% (2/3 handlers). -'GraphTokenLockWallet' स्त्रोतासाठी हँडलर: -हँडलर 'हँडलटोकन्सरिलीझ्ड' ची चाचणी केलेली नाही. -हँडलर 'HandleTokensWithdrawn' ची चाचणी केली जात नाही. -हँडलर 'HandleTokensRevoked' ची चाचणी केलेली नाही. -हँडलर 'handleManagerUpdated' चाचणी केलेली नाही. -हँडलर 'handleApproveTokenDestinations' ची चाचणी केलेली नाही. -हँडलर 'handleRevokeTokenDestinations' ची चाचणी केलेली नाही. -चाचणी कव्हरेज: 0.0% (0/6 हँडलर). +Handlers for source 'GraphTokenLockWallet': +Handler 'handleTokensReleased' is not tested. +Handler 'handleTokensWithdrawn' is not tested. +Handler 'handleTokensRevoked' is not tested. +Handler 'handleManagerUpdated' is not tested. +Handler 'handleApproveTokenDestinations' is not tested. +Handler 'handleRevokeTokenDestinations' is not tested. +Test coverage: 0.0% (0/6 handlers). -जागतिक चाचणी कव्हरेज: 22.2% (2/9 हँडलर). +Global test coverage: 22.2% (2/9 handlers). ``` ### लॉग आउटपुटमध्ये चाचणी रन टाइम कालावधी लॉग आउटपुटमध्ये चाचणी रन कालावधी समाविष्ट आहे. येथे एक उदाहरण आहे: -`[गुरु, 31 मार्च 2022 13:54:54 +0300] प्रोग्राम अंमलात आणला: 42.270ms.` +`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` ## सामान्य कंपाइलर त्रुटी @@ -1385,15 +1385,15 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(18,12) > -> त्रुटी TS2554: अपेक्षित आहे? युक्तिवाद, पण मिळाले?. +> ERROR TS2554: Expected ? arguments, but got ?. > > नवीन ethereum.Transaction परत करा(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -`graph-ts` आणि `matchstick-as` मधील जुळत नसल्यामुळे वितर्कांमधील जुळत नाही. यासारख्या समस्यांचे निराकरण करण्याचा सर्वोत्तम मार्ग म्हणजे नवीनतम रिलीझ केलेल्या आवृत्तीवर सर्वकाही अद्यतनित करणे. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. -## Additional Resources +## अतिरिक्त संसाधने For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). From 031001149fc7d6225d6f9a1094b844aa0dfc45f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:21 -0500 Subject: [PATCH 0215/1534] New translations unit-testing-framework.mdx (Hindi) --- .../creating/unit-testing-framework.mdx | 148 +++++++++--------- 1 file changed, 75 insertions(+), 73 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/unit-testing-framework.mdx b/website/src/pages/hi/subgraphs/developing/creating/unit-testing-framework.mdx index 7d369ebf7700..483a385a5362 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/unit-testing-framework.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/unit-testing-framework.mdx @@ -1,39 +1,41 @@ --- -title: "Unit परीक्षण फ्रेमवर्क प्राप्त करना\nकला" +title: |- + Unit परीक्षण फ्रेमवर्क प्राप्त करना + कला --- -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. +Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and successfully deploy their subgraphs. ## Benefits of Using Matchstick -- It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. +- यह Rust में लिखा गया है और उच्च प्रदर्शन के लिए अनुकूलित है। +- यह आपको डेवलपर विशेषता तक पहुंच प्रदान करता है, जिसमें contract कॉल्स को मॉक करने, स्टोर स्टेट के बारे में एसेर्शन करने, सबग्राफ विफलताओं की निगरानी करने, टेस्ट परफॉर्मेंस जांचने और बहुत कुछ करने की क्षमता शामिल है। ## शुरू करना -### Install Dependencies +### डिपेंडेंसीज़ इंस्टॉल करें -In order to use the test helper methods and run tests, you need to install the following dependencies: +टेस्ट हेल्पर मेथड का उपयोग करने और टेस्ट चलाने के लिए, आपको निम्नलिखित डिपेंडेंसीज़ इंस्टॉल करनी होंगी: ```sh yarn add --dev matchstick-as ``` -### Install PostgreSQL +### PostgreSQL स्थापित करें -`graph-node` depends on PostgreSQL, so if you don't already have it, then you will need to install it. +`ग्राफ-नोड` PostgreSQL पर निर्भर करता है, इसलिए यदि यह पहले से आपके पास नहीं है, तो आपको इसे इंस्टॉल करने की आवश्यकता होगी। -> Note: It's highly recommended to use the commands below to avoid unexpected errors. +> नोट: अनपेक्षित त्रुटियों से बचने के लिए नीचे दिए गए कमांड्स का उपयोग करना अत्यधिक अनुशंसित है। -#### Using MacOS +#### MacOS का उपयोग करना -Installation command: +स्थापना आदेश: ```sh brew install postgresql ``` -नवीनतम libpq.5.lib के लिए एक symlink बनाएं _आपको पहले यह dir बनाने की आवश्यकता हो सकती है_ `/usr/local/opt/postgresql/lib/` +यहां तक कि नवीनतम libpq.5.lib_ का एक symlink बनाएं। आपको पहले यह dir बनाने की आवश्यकता हो सकती है: `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -41,13 +43,13 @@ ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/o #### Using Linux -Installation command (depends on your distro): +इंस्टॉलेशन कमांड (आपके डिस्ट्रीब्यूशन पर निर्भर करता है): ```sh sudo apt install postgresql ``` -### Using WSL (Windows Subsystem for Linux) +### WSL (Windows Subsystem for Linux) का उपयोग करते हुए कृपया ध्यान दें, आर्बिट्रम माइग्रेशन पूरा होने पर इसमें सुधार होगा, जिससे नेटवर्क पर भाग लेने के लिए गैस की लागत काफी कम हो जाएगी।... @@ -61,13 +63,13 @@ static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = /node_modules/gluegun/build/index.js:13 throw up; ``` -कृपया सुनिश्चित करें कि आप Node.js के नए संस्करण पर हैं, ग्राफ-क्ली अब **v10.19.0** का समर्थन नहीं करता है, और यह अभी भी WSL पर नई Ubuntu छवियों के लिए डिफ़ॉल्ट संस्करण है। उदाहरण के लिए मैचस्टिक के WSL पर **v18.1.0** के साथ काम करने की पुष्टि हो गई है, आप इसे या तो **nvm** के माध्यम से बदल सकते हैं या यदि आप अपने वैश्विक Node.js को अपडेट करते हैं। अपने नोडज को अपडेट करने के बाद `node_modules` को हटाना और `npm install` फिर से चलाना न भूलें! फिर, सुनिश्चित करें कि आपके पास **libpq** स्थापित है, आप इसे चलाकर कर सकते हैं +कृपया सुनिश्चित करें कि आप नोड.js के नए वर्जन पर हैं, क्योंकि** v10.19.0** अब graph-cli द्वारा समर्थित नहीं है, और यह अभी भी WSL पर नए Ubuntu इमेज के लिए डिफ़ॉल्ट वर्जन है। उदाहरण के लिए,instances Matchstick की पुष्टि हो चुकी है कि यह WSL पर **v18.1.0** के साथ काम कर रहा है। आप इसे **nvm** के माध्यम से या अपने ग्लोबल नोड.js को अपडेट करके स्विच कर सकते हैं।अपडेट के बाद, `नोड_modules` को डिलीट करना न भूलें और \`npm install दोबारा चलाएँ। फिर, सुनिश्चित करें कि आपके पास **libpq** इंस्टॉल है। आप इसे निम्नलिखित कमांड चलाकर कर सकते हैं: ``` sudo apt-get install libpq-dev ``` -और अंत में, `ग्राफ़ परीक्षण` का उपयोग न करें (जो आपके ग्राफ़-क्ली की वैश्विक स्थापना का उपयोग करता है और किसी कारण से ऐसा लगता है कि यह वर्तमान में WSL पर टूटा हुआ है), इसके बजाय `यार्न परीक्षण` का उपयोग करें या `npm रन टेस्ट` (जो ग्राफ-क्ली के स्थानीय, प्रोजेक्ट-स्तरीय उदाहरण का उपयोग करेगा, जो एक आकर्षण की तरह काम करता है)। उसके लिए आपको अपनी `package.json` फ़ाइल में एक `"test"` स्क्रिप्ट की आवश्यकता होगी जो कुछ सरल हो सकती है +And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as ```json { @@ -87,7 +89,7 @@ sudo apt-get install libpq-dev ### Using Matchstick -अपने सबग्राफ प्रोजेक्ट में **माचिस** का उपयोग करने के लिए बस एक टर्मिनल खोलें, अपने प्रोजेक्ट के रूट फ़ोल्डर में नेविगेट करें और बस `ग्राफ़ टेस्ट [विकल्प] <डेटासोर्स> ` - यह नवीनतम **मैचस्टिक** बाइनरी को डाउनलोड करता है और एक परीक्षण फ़ोल्डर में निर्दिष्ट परीक्षण या सभी परीक्षण चलाता है (या सभी मौजूदा परीक्षण यदि कोई डेटा स्रोत ध्वज निर्दिष्ट नहीं है)। +To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). ### सीएलआई विकल्प @@ -109,7 +111,7 @@ This will run only that specific test file: graph test path/to/file.test.ts ``` -**विकल्प:** +**Options:** ```sh -c, --coverage Run the tests in coverage mode @@ -123,21 +125,21 @@ graph test path/to/file.test.ts ### Docker -`graph-cli 0.25.2` से, `graph test` कमांड `माचिस` को `-d झंडा। डॉकर कार्यान्वयन बाइंड माउंट का उपयोग करता है, इसलिए इसे हर बार ग्राफ में डॉकर छवि को फिर से बनाने की आवश्यकता नहीं होती है परीक्षण -d ` आदेश निष्पादित किया गया है। वैकल्पिक रूप से आप डॉकर को मैन्युअल रूप से चलाने के लिए [matchstick](https://github.com/LimeChain/matchstick#docker-) रिपॉजिटरी के निर्देशों का पालन कर सकते हैं। +From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. ❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). -❗ यदि आपने पहले `graph test` चलाया है, तो आपको डॉकर बिल्ड के दौरान निम्न त्रुटि का सामना करना पड़ सकता है: +❗ If you have previously ran `graph test` you may encounter the following error during docker build: ```sh - प्रेषक से त्रुटि: xattr node_modules/बाइनरी-इंस्टॉल-रॉ/बिन/बाइनरी-<प्लेटफ़ॉर्म> में विफल: अनुमति अस्वीकृत + error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -इस मामले में रूट फ़ोल्डर में एक `.dockerignore` बनाएं और `node_modules/binary-install-raw/bin` जोड़ें +In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` ### विन्यास -माचिस की तीलियों को `matchstick.yaml` कॉन्फ़िग फ़ाइल के माध्यम से एक कस्टम परीक्षण, libs और मैनिफ़ेस्ट पथ का उपयोग करने के लिए कॉन्फ़िगर किया जा सकता है: +Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: ```yaml testsFolder: path/to/tests @@ -147,11 +149,11 @@ manifestPath: path/to/subgraph.yaml ### डेमो सबग्राफ -आप [डेमो सबग्राफ रेपो](https://github.com/LimeChain/demo-subgraph) की क्लोनिंग करके इस गाइड के उदाहरणों को आजमा सकते हैं और उनके साथ प्रयोग कर सकते हैं। +You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) ### वीडियो शिक्षण -साथ ही आप ["अपने सबग्राफ के लिए यूनिट टेस्ट लिखने के लिए माचिस का उपयोग कैसे करें"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) पर वीडियो श्रृंखला देख सकते हैं +Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) ## Tests structure @@ -159,11 +161,11 @@ _**IMPORTANT: The test structure described below depens on `matchstick-as` versi ### describe() -`describe(name: String , () => {})` - एक परीक्षण समूह को परिभाषित करता है। +`describe(name: String , () => {})` - Defines a test group. -**_टिप्पणियाँ:_** +**_Notes:_** -- _वर्णन अनिवार्य नहीं हैं। आप अभी भी test() पुराने तरीके का वर्णन() ब्लॉक के बाहर उपयोग कर सकते हैं_ +- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ उदाहरण: @@ -178,7 +180,7 @@ describe("handleNewGravatar()", () => { }) ``` -नेस्टेड `describe()` उदाहरण: +Nested `describe()` example: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -203,7 +205,7 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - एक टेस्ट केस को परिभाषित करता है। आप परीक्षण () का उपयोग वर्णन () ब्लॉक के अंदर या स्वतंत्र रूप से कर सकते हैं। +`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. उदाहरण: @@ -232,11 +234,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -फ़ाइल में किसी भी परीक्षण से पहले एक कोड ब्लॉक चलाता है। यदि `beforeAll` को `वर्णित करें` ब्लॉक के अंदर घोषित किया जाता है, तो यह उस `वर्णन` ब्लॉक की शुरुआत में चलता है। +Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. उदाहरण: -`beforeAll` के अंदर का कोड फ़ाइल में _सभी_ परीक्षणों से पहले एक बार निष्पादित होगा। +Code inside `beforeAll` will execute once before _all_ tests in the file. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -263,7 +265,7 @@ describe("When entity already exists", () => { }) ``` -`beforeAll` के अंदर कोड पहले वर्णन ब्लॉक में सभी परीक्षणों से पहले एक बार निष्पादित होगा +Code inside `beforeAll` will execute once before all tests in the first describe block ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -292,11 +294,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -फ़ाइल में सभी परीक्षणों के बाद एक कोड ब्लॉक चलाता है। अगर `आफ्टरऑल` को `डिस्क्राइब` ब्लॉक के अंदर घोषित किया जाता है, तो यह उस `डिस्क्राइब` ब्लॉक के अंत में चलता है। +Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. उदाहरण: -`afterAll` के अंदर का कोड _सभी_ फ़ाइल में परीक्षण के बाद एक बार निष्पादित होगा। +Code inside `afterAll` will execute once after _all_ tests in the file. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -321,17 +323,17 @@ describe("handleUpdatedGravatar", () => { }) ``` -`afterAll` के अंदर कोड पहले वर्णन ब्लॉक में सभी परीक्षणों के बाद एक बार निष्पादित होगा +Code inside `afterAll` will execute once after all tests in the first describe block ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { - afterAll(() => { + afterAll(() => { store.remove("Gravatar", "0x1") ... - }) + }) test("It creates a new entity with Id 0x0", () => { ... @@ -353,9 +355,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -प्रत्येक परीक्षण से पहले एक कोड ब्लॉक चलाता है। अगर `beforeEach` को `decribe` ब्लॉक के अंदर घोषित किया जाता है, तो यह उस `describe` ब्लॉक में प्रत्येक टेस्ट से पहले चलता है। +Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. -उदाहरण: `beforeEach` के अंदर कोड प्रत्येक परीक्षण से पहले निष्पादित होगा। +Examples: Code inside `beforeEach` will execute before each tests. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -378,7 +380,7 @@ describe("handleNewGravatars, () => { ... ``` -`beforeEach` के अंदर का कोड वर्णन में प्रत्येक परीक्षण से पहले ही निष्पादित होगा +Code inside `beforeEach` will execute only before each test in the that describe ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -392,7 +394,7 @@ describe('handleUpdatedGravatars', () => { gravatar.save() }) - test('Upates the displayName', () => { + test('Updates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') // code that should update the displayName to 1st Gravatar @@ -416,11 +418,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -प्रत्येक परीक्षण के बाद एक कोड ब्लॉक चलाता है। यदि `के बाद` को `वर्णन` ब्लॉक के अंदर घोषित किया जाता है, तो यह उस `वर्णन` ब्लॉक में प्रत्येक परीक्षण के बाद चलता है। +Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. उदाहरण: -प्रत्येक परीक्षण के बाद `afterEach` के अंदर का कोड निष्पादित होगा। +Code inside `afterEach` will execute after every test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -441,7 +443,7 @@ describe("handleNewGravatar", () => { }) describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -459,7 +461,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -उस वर्णन में प्रत्येक परीक्षण के बाद `afterEach` के अंदर का कोड निष्पादित होगा +Code inside `afterEach` will execute after each test in that describe ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -481,7 +483,7 @@ describe("handleUpdatedGravatar", () => { store.remove("Gravatar", "0x0") }) - test("Upates the displayName", () => { + test("Updates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") // code that should update the displayName to 1st Gravatar @@ -565,7 +567,7 @@ assert.dataSourceExists( ## यूनिट टेस्ट लिखें -[डेमो सबग्राफ](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts) में Gravatar उदाहरणों का उपयोग करके देखते हैं कि एक साधारण इकाई परीक्षण कैसा दिखेगा। +Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). मान लें कि हमारे पास निम्नलिखित हैंडलर फ़ंक्शन हैं (हमारे जीवन को आसान बनाने के लिए दो सहायक कार्यों के साथ): @@ -603,7 +605,7 @@ export function createNewGravatarEvent( newGravatarEvent.parameters = new Array() let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) let addressParam = new ethereum.EventParam( - 'ownderAddress', + 'ownerAddress', ethereum.Value.fromAddress(Address.fromString(ownerAddress)), ) let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) @@ -652,23 +654,23 @@ test('Next test', () => { }) ``` -अनपैक करने के लिए यह बहुत कुछ है! सबसे पहले, नोटिस करने वाली एक महत्वपूर्ण बात यह है कि हम `matchstick-as` से चीजें आयात कर रहे हैं, हमारी असेंबली स्क्रिप्ट हेल्पर लाइब्रेरी (एनपीएम मॉड्यूल के रूप में वितरित)। आप रिपॉजिटरी [यहां](https://github.com/LimeChain/matchstick-as) पा सकते हैं। `matchstick-as` हमें उपयोगी परीक्षण विधियाँ प्रदान करता है और `test()` फ़ंक्शन को भी परिभाषित करता है जिसका उपयोग हम अपने परीक्षण ब्लॉक बनाने के लिए करेंगे। इसका बाकी हिस्सा बहुत सीधा है - यहाँ क्या होता है: +That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - हम अपनी प्रारंभिक स्थिति सेट कर रहे हैं और एक कस्टम Gravatar इकाई जोड़ रहे हैं; -- हम दो `NewGravatar` ईवेंट ऑब्जेक्ट्स को उनके डेटा के साथ, `createNewGravatarEvent()` फ़ंक्शन का उपयोग करके परिभाषित करते हैं; -- हम उन घटनाओं के लिए हैंडलर विधियों को कॉल कर रहे हैं - `handleNewGravatars()` और हमारे कस्टम ईवेंट की सूची में पास कर रहे हैं; +- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; +- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; - हम स्टोर की स्थिति पर जोर देते हैं। वह कैसे काम करता है? - हम इकाई प्रकार और आईडी का एक अनूठा संयोजन पारित कर रहे हैं। फिर हम उस इकाई पर एक विशिष्ट क्षेत्र की जाँच करते हैं और दावा करते हैं कि इसका वह मूल्य है जिसकी हम अपेक्षा करते हैं। हम यह दोनों प्रारंभिक Gravatar एंटिटी के लिए कर रहे हैं जिसे हमने स्टोर में जोड़ा है, साथ ही दो Gravatar एंटिटी जो हैंडलर फ़ंक्शन को कॉल करने पर जुड़ जाती हैं; -- और अंत में - हम `clearStore()` का उपयोग करके स्टोर की सफाई कर रहे हैं ताकि हमारा अगला परीक्षण एक नए और खाली स्टोर ऑब्जेक्ट से शुरू हो सके। हम जितने चाहें उतने टेस्ट ब्लॉक परिभाषित कर सकते हैं। +- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. ये रहा - हमने अपना पहला परीक्षण बना लिया है! 👏 अब हमारे परीक्षण चलाने के लिए आपको बस अपने सबग्राफ रूट फ़ोल्डर में निम्नलिखित को चलाने की आवश्यकता है: -`ग्राफ परीक्षण गुरुत्वाकर्षण` +`graph test Gravity` और अगर सब ठीक हो जाता है तो आपको निम्नलिखित के साथ बधाई दी जानी चाहिए: -![माचिस की तीली कह रही है "सभी परीक्षण पास हो गए!"](/img/matchstick-tests-passed.png) +![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) ## सामान्य परीक्षण परिदृश्य @@ -754,11 +756,11 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri ### आईपीएफएस फाइलों का मज़ाक उड़ाना (मैचस्टिक 0.4.1 से) -उपयोगकर्ता `mockIpfsFile(hash, filePath)` फ़ंक्शन का उपयोग करके IPFS फ़ाइलों का मज़ाक उड़ा सकते हैं। फ़ंक्शन दो तर्कों को स्वीकार करता है, पहला IPFS फ़ाइल हैश/पथ है और दूसरा एक स्थानीय फ़ाइल का पथ है। +Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. -नोट: `ipfs.map/ipfs.mapJSON` का परीक्षण करते समय, कॉलबैक फ़ंक्शन को परीक्षण फ़ाइल से निर्यात किया जाना चाहिए, ताकि matchstck इसका पता लगा सके, जैसे `processGravatar()` फ़ंक्शन नीचे परीक्षण उदाहरण में: +NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: -`.test.ts` फ़ाइल: +`.test.ts` file: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' @@ -795,7 +797,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` फ़ाइल: +`utils.ts` file: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -857,11 +859,11 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Assert.fieldEquals() फ़ंक्शन चलाने से दिए गए अपेक्षित मान के विरुद्ध दिए गए फ़ील्ड की समानता की जाँच होगी। यदि मान **नहीं** बराबर हैं तो परीक्षण विफल हो जाएगा और एक त्रुटि संदेश आउटपुट होगा। अन्यथा परीक्षा सफलतापूर्वक उत्तीर्ण होगी। +Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. ### इवेंट मेटाडेटा के साथ इंटरैक्ट करना -उपयोगकर्ता डिफ़ॉल्ट लेन-देन मेटाडेटा का उपयोग कर सकते हैं, जिसे `newMockEvent()` फ़ंक्शन का उपयोग करके एथेरियम.इवेंट के रूप में लौटाया जा सकता है। निम्न उदाहरण दिखाता है कि आप ईवेंट ऑब्जेक्ट पर उन फ़ील्ड्स को कैसे पढ़/लिख सकते हैं: +Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: ```typescript // Read @@ -878,7 +880,7 @@ newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### यह दावा करना कि एक इकाई स्टोर में **नहीं** है +### Asserting that an Entity is **not** in the store उपयोगकर्ता यह दावा कर सकते हैं कि स्टोर में कोई इकाई मौजूद नहीं है। फ़ंक्शन एक इकाई प्रकार और एक आईडी लेता है। यदि इकाई वास्तव में स्टोर में है, तो प्रासंगिक त्रुटि संदेश के साथ परीक्षण विफल हो जाएगा। इस कार्यक्षमता का उपयोग कैसे करें इसका एक त्वरित उदाहरण यहां दिया गया है: @@ -1160,7 +1162,7 @@ test('ethereum/contract dataSource creation example', () => { #### Testing `file/ipfs` templates -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers +Similarly to contract dynamic data sources, users can test test file data sources and their handlers ##### Example `subgraph.yaml` @@ -1293,25 +1295,25 @@ Using **Matchstick**, subgraph developers are able to run a script that will cal The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### आवश्यक शर्तें +### Prerequisites -**मैचस्टिक** में प्रदान की गई परीक्षण कवरेज कार्यात्मकता को चलाने के लिए, कुछ चीजें हैं जिन्हें आपको पहले से तैयार करने की आवश्यकता है: +To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: #### अपने हैंडलर निर्यात करें -**मैचस्टिक** के लिए यह जांचने के लिए कि कौन से हैंडलर चलाए जा रहे हैं, उन हैंडलर को **परीक्षण फ़ाइल** से निर्यात करने की आवश्यकता है। तो उदाहरण के लिए हमारे उदाहरण में, हमारे गुरुत्वाकर्षण.test.ts फ़ाइल में हमारे पास निम्नलिखित हैंडलर आयात किए जा रहे हैं: +In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -उस फ़ंक्शन को दिखाई देने के लिए (इसे `वाट` फ़ाइल **नाम से** में शामिल करने के लिए) हमें इसे निर्यात करने की भी आवश्यकता है, जैसे यह: +In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: ```typescript export { handleNewGravatar } ``` -### प्रयोग +### Usage एक बार यह सब सेट हो जाने के बाद, परीक्षण कवरेज टूल चलाने के लिए, बस चलाएँ: @@ -1319,7 +1321,7 @@ export { handleNewGravatar } graph test -- -c ``` -आप अपनी `package.json` फ़ाइल में कस्टम `कवरेज` कमांड भी जोड़ सकते हैं, जैसे: +You could also add a custom `coverage` command to your `package.json` file, like so: ```typescript "scripts": { @@ -1371,13 +1373,13 @@ Global test coverage: 22.2% (2/9 handlers). लॉग आउटपुट में टेस्ट रन अवधि शामिल है। यहाँ एक उदाहरण है: -`[गुरु, 31 मार्च 2022 13:54:54 +0300] कार्यक्रम में निष्पादित: 42.270ms।` +`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` ## सामान्य संकलक त्रुटियाँ > गंभीर: संदर्भ के साथ मान्य मॉड्यूल से WasmInstance नहीं बना सका: अज्ञात आयात: wasi_snapshot_preview1::fd_write परिभाषित नहीं किया गया है -इसका मतलब है कि आपने अपने कोड में ``console.log`\` का उपयोग किया है, जिसे AssemblyScript द्वारा सपोर्ट नहीं किया जाता है। कृपया [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api) का उपयोग करने पर विचार करें। +This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/subgraphs/developing/creating/graph-ts/api/#logging-api) > त्रुटि TS2554: अपेक्षित? तर्क, लेकिन मिला ?. > @@ -1391,9 +1393,9 @@ Global test coverage: 22.2% (2/9 handlers). > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -तर्कों में बेमेल `ग्राफ़-टीएस` और `मैचस्टिक-एज़` में बेमेल होने के कारण होता है। इस तरह की समस्याओं को ठीक करने का सबसे अच्छा तरीका है कि सभी चीज़ों को नवीनतम रिलीज़ किए गए संस्करण में अपडेट कर दिया जाए. +The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. -## अतिरिक्त संसाधन +## Additional Resources For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). From c0997f23bfb56c933da1c84ff31cb951343c90c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:22 -0500 Subject: [PATCH 0216/1534] New translations transferring-a-subgraph.mdx (Romanian) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ro/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/ro/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ro/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From f7112f71be285dead9e76a222d282aef5cee797d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:23 -0500 Subject: [PATCH 0217/1534] New translations transferring-a-subgraph.mdx (French) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/fr/subgraphs/developing/managing/transferring-a-subgraph.mdx index 4f12b5a94032..fe386614b198 100644 --- a/website/src/pages/fr/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/fr/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transférer un Subgraph +title: Transfer d'un Subgraph --- Les subgraphs publiés sur le réseau décentralisé possèdent un NFT minté à l'adresse qui a publié le subgraph. Le NFT est basé sur la norme ERC721, ce qui facilite les transferts entre comptes sur The Graph Network. @@ -35,7 +35,7 @@ Pour transférer la propriété d'un subgraph, procédez comme suit : 2. Choisissez l'adresse vers laquelle vous souhaitez transférer le subgraph : - ![Transfert de propriété de subgraph](/img/subgraph-ownership-transfer-2.png) + ![Transfert de propriété d'un subgraph](/img/subgraph-ownership-transfer-2.png) Optionnellement, vous pouvez également utiliser l'interface utilisateur intégrée dans les marketplaces NFT comme OpenSea : From 162badd684c5f16114e6838452396754ff4a9837 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:24 -0500 Subject: [PATCH 0218/1534] New translations transferring-a-subgraph.mdx (Spanish) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/es/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/es/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/es/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 26902f5843f064c10312fc35482591870c512822 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:25 -0500 Subject: [PATCH 0219/1534] New translations transferring-a-subgraph.mdx (Arabic) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ar/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/ar/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ar/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From b4485c6ff08edffc7d3cafa90ca41aad73a7ea30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:26 -0500 Subject: [PATCH 0220/1534] New translations transferring-a-subgraph.mdx (Czech) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/cs/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/cs/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/cs/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From d3b81f1b5dd2bac365762cdc16fb68d58f5d1662 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:27 -0500 Subject: [PATCH 0221/1534] New translations transferring-a-subgraph.mdx (German) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/de/subgraphs/developing/managing/transferring-a-subgraph.mdx index ed29ea904e5b..d6837fbade98 100644 --- a/website/src/pages/de/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/de/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Einen Subgraph übertragen +title: Transferring a Subgraph --- Subgraphs, die im dezentralen Netzwerk veröffentlicht werden, haben eine NFT, die auf die Adresse geprägt wird, die den Subgraph veröffentlicht hat. Die NFT basiert auf dem Standard ERC721, der Überweisungen zwischen Konten im The Graph Network erleichtert. @@ -35,7 +35,7 @@ Um das Eigentum an einem Subgraph zu übertragen, gehen Sie wie folgt vor: 2. Wählen Sie die Adresse, an die Sie den Subgraph übertragen möchten: - ![Subgraph-Besitzübertragung](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optional können Sie auch die integrierte Benutzeroberfläche von NFT-Marktplätzen wie OpenSea verwenden: From a89c8e1ff2ff52764ac182f51035f339276b3149 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:28 -0500 Subject: [PATCH 0222/1534] New translations transferring-a-subgraph.mdx (Italian) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/it/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/it/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/it/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 43d1aec92859a0b9ee972853709652c2e0d18f7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:29 -0500 Subject: [PATCH 0223/1534] New translations transferring-a-subgraph.mdx (Japanese) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ja/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/ja/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ja/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 3e9220b2fc9ac0151a7f08ac99f091dca3abe8a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:30 -0500 Subject: [PATCH 0224/1534] New translations transferring-a-subgraph.mdx (Korean) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ko/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/ko/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ko/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From e2066d4c1503252888a31595e2685e74b9ca8d05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:31 -0500 Subject: [PATCH 0225/1534] New translations transferring-a-subgraph.mdx (Dutch) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/nl/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/nl/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/nl/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 9fafd3dcac068f489202b3e76cbdc641286c1169 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:32 -0500 Subject: [PATCH 0226/1534] New translations transferring-a-subgraph.mdx (Polish) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/pl/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/pl/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/pl/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 52a40a47ba07318d9162646fb524ae6cad4b5a81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:33 -0500 Subject: [PATCH 0227/1534] New translations transferring-a-subgraph.mdx (Portuguese) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/pt/subgraphs/developing/managing/transferring-a-subgraph.mdx index 5ba353e28f7f..1931370a6df7 100644 --- a/website/src/pages/pt/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/pt/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs publicados na rede descentralizada terão um NFT mintado no endereço que publicou o subgraph. O NFT é baseado no padrão ERC-721, que facilita transferências entre contas na Graph Network. @@ -35,7 +35,7 @@ Para transferir a titularidade de um subgraph, faça o seguinte: 2. Escolha o endereço para o qual gostaria de transferir o subgraph: - ![Transferência de Titularidade de Subgraph](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Também é possível usar a interface embutida de mercados de NFT, como o OpenSea: From c66f8326dd29ed9c608640ecad6898e8b9f6ad0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:34 -0500 Subject: [PATCH 0228/1534] New translations transferring-a-subgraph.mdx (Russian) --- .../developing/managing/transferring-a-subgraph.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ru/subgraphs/developing/managing/transferring-a-subgraph.mdx index 7b5fd719e291..c57a0263068e 100644 --- a/website/src/pages/ru/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ru/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Перенос субграфа +title: Transferring a Subgraph --- Субграфы, опубликованные в децентрализованной сети, имеют NFT, сминченный по адресу, опубликовавшему субграф. NFT основан на стандарте ERC721, который облегчает переводы между аккаунтами в The Graph Network. @@ -31,11 +31,11 @@ https://rainbow.me/your-wallet-addres 1. Используйте встроенный в Subgraph Studio пользовательский интерфейс: - ![Передача права собственности на субграф](/img/subgraph-ownership-transfer-1.png) + ![Передача права собственности на субграф](/image/subgraph-ownership-transfer-1.png) 2. Выберите адрес, на который хотели бы передать субграф: - ![Передача права собственности на субграф](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) При желании Вы также можете использовать встроенный пользовательский интерфейс таких маркетплейсов NFT, как OpenSea: From 727c9088ec3303d1bcf486efe55c4c2672b3d701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:35 -0500 Subject: [PATCH 0229/1534] New translations transferring-a-subgraph.mdx (Swedish) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/sv/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/sv/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/sv/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 856fad8f4ea03f088ba575e7a83f05cd0d497d6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:36 -0500 Subject: [PATCH 0230/1534] New translations transferring-a-subgraph.mdx (Turkish) --- .../developing/managing/transferring-a-subgraph.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/tr/subgraphs/developing/managing/transferring-a-subgraph.mdx index 78f649b0e669..3631cc8a2973 100644 --- a/website/src/pages/tr/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/tr/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,10 +1,10 @@ --- -title: Bir Subgraph’i Transfer Etme +title: Transferring a Subgraph --- Merkeziyetsiz ağda yayımlanan subgraph’ler, NFT olarak oluşturulup subgraph’i yayımlayan adrese gönderilir. Bu NFT, The Graph Ağı’ndaki hesaplar arasında transferi kolaylaştıran standart bir ERC721 sözleşmesini temel alır. -## Hatırlatmalar +## Reminders - NFT’ye sahip olan kişi, subgraph’in kontrolünü elinde tutar. - NFT’nin sahibi NFT’yi satmaya veya transfer etmeye karar verirse, artık bu subgraph’i ağ üzerinde düzenleyemez veya güncelleyemez. @@ -35,7 +35,7 @@ Bir subgraph’in sahipliğini transfer etmek için şu adımları izleyin: 2. Subgraph’i transfer etmek istediğiniz adresi seçin: - ![Subgraph Sahipliği Transferi](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Alternatif olarak, OpenSea gibi NFT pazar yerlerinin entegre kullanıcı arayüzünü de kullanabilirsiniz: From 896148c65b1ed24baa8d5824259129e005aef290 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:37 -0500 Subject: [PATCH 0231/1534] New translations transferring-a-subgraph.mdx (Ukrainian) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/uk/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/uk/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/uk/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 0d96edbd1bcdfdf3840f5a8794ac74601408f596 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:38 -0500 Subject: [PATCH 0232/1534] New translations transferring-a-subgraph.mdx (Chinese Simplified) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/zh/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/zh/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/zh/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 89612778c11ff0592e952f8ef8a276173e3053f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:39 -0500 Subject: [PATCH 0233/1534] New translations transferring-a-subgraph.mdx (Urdu (Pakistan)) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/ur/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/ur/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/ur/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 750b506c4655510183e7da3593a95036dc366bd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:40 -0500 Subject: [PATCH 0234/1534] New translations transferring-a-subgraph.mdx (Vietnamese) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/vi/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/vi/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/vi/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From d9592efc2b8e27830e15584784b20a35496a3c6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:41 -0500 Subject: [PATCH 0235/1534] New translations transferring-a-subgraph.mdx (Marathi) --- .../subgraphs/developing/managing/transferring-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/mr/subgraphs/developing/managing/transferring-a-subgraph.mdx index 19999c39b1e3..0fc6632cbc40 100644 --- a/website/src/pages/mr/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/mr/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Transfer a Subgraph +title: Transferring a Subgraph --- Subgraphs published to the decentralized network have an NFT minted to the address that published the subgraph. The NFT is based on a standard ERC721, which facilitates transfers between accounts on The Graph Network. @@ -35,7 +35,7 @@ To transfer ownership of a subgraph, do the following: 2. Choose the address that you would like to transfer the subgraph to: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) Optionally, you can also use the built-in UI of NFT marketplaces like OpenSea: From 44851d7b07bedcb59ac3cc7980040ab9419bc17e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:42 -0500 Subject: [PATCH 0236/1534] New translations transferring-a-subgraph.mdx (Hindi) --- .../developing/managing/transferring-a-subgraph.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/managing/transferring-a-subgraph.mdx b/website/src/pages/hi/subgraphs/developing/managing/transferring-a-subgraph.mdx index fa8e363f09df..3720eff056a0 100644 --- a/website/src/pages/hi/subgraphs/developing/managing/transferring-a-subgraph.mdx +++ b/website/src/pages/hi/subgraphs/developing/managing/transferring-a-subgraph.mdx @@ -1,10 +1,10 @@ --- -title: सबग्राफ ट्रांसफर करें +title: Transferring a Subgraph --- विभिन्न नेटवर्क पर प्रकाशित subgraphs के लिए उस पते पर एक NFT जारी किया गया है जिसने subgraph प्रकाशित किया। NFT एक मानक ERC721 पर आधारित है, जो The Graph नेटवर्क पर खातों के बीच स्थानांतरण की सुविधा देता है। -## अनुस्मारक +## अनुस्मारक - जो भी 'NFT' का मालिक है, वह subgraph को नियंत्रित करता है। - यदि मालिक 'NFT' को बेचने या स्थानांतरित करने का निर्णय लेता है, तो वे नेटवर्क पर उस subgraph को संपादित या अपडेट नहीं कर पाएंगे। @@ -35,7 +35,7 @@ https://rainbow.me/your-wallet-addres 2. उस पते का चयन करें जिसे आप 'subgraph' को स्थानांतरित करना चाहेंगे: - ![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) + ![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-2.png) आप वैकल्पिक रूप से NFT बाजारों जैसे OpenSea के अंतर्निहित UI का भी उपयोग कर सकते हैं: From a9732a618e0b5d84809b7238d7eedeb5fe2d1336 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:43 -0500 Subject: [PATCH 0237/1534] New translations graphql-api.mdx (Romanian) --- .../pages/ro/subgraphs/querying/graphql-api.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ro/subgraphs/querying/graphql-api.mdx b/website/src/pages/ro/subgraphs/querying/graphql-api.mdx index f9176794ae2b..d93f73706ec6 100644 --- a/website/src/pages/ro/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ro/subgraphs/querying/graphql-api.mdx @@ -29,7 +29,7 @@ Query for a single `Token` entity defined in your schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. Query all `Token` entities: @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). From 743004541c139be6e7d68bd9796815100cfc54ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:44 -0500 Subject: [PATCH 0238/1534] New translations graphql-api.mdx (French) --- .../fr/subgraphs/querying/graphql-api.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/fr/subgraphs/querying/graphql-api.mdx b/website/src/pages/fr/subgraphs/querying/graphql-api.mdx index 19063cc6e428..025d9de10e7d 100644 --- a/website/src/pages/fr/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/fr/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Exemples -Requête pour une seule entité `Token` définie dans votre schéma : +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ Requête pour une seule entité `Token` définie dans votre schéma : } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -Interrogez toutes les entités `Token` : +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### Exemple de tri d'entités imbriquées -Depuis Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0), les entités peuvent être triées sur la base d'entités imbriquées. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -79,7 +79,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> Actuellement, vous pouvez trier par types `String` ou `ID` profonds à un niveau sur les champs `@entity` et `@derivedFrom`. Malheureusement, le [tri par interfaces sur des entités d'un seul niveau](https://github.com/graphprotocol/graph-node/pull/4058), le tri par champs qui sont des tableaux et des entités imbriquées est pas encore pris en charge. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### Pagination @@ -90,7 +90,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### Exemple utilisant `first` +#### Example using `first` Interroger les 10 premiers tokens : @@ -103,11 +103,11 @@ Interroger les 10 premiers tokens : } ``` -Pour rechercher des groupes d'entités au milieu d'une collection, le paramètre `skip` peut être utilisé conjointement avec le paramètre `first` pour ignorer un nombre spécifié d'entités en commençant par le début. de la collection. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### Exemple utilisant `first` et `skip` +#### Example using `first` and `skip` -Interrogez 10 entités `Token`, décalées de 10 places depuis le début de la collection : +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -118,7 +118,7 @@ Interrogez 10 entités `Token`, décalées de 10 places depuis le début de la } ``` -#### Exemple utilisant `first` et `id_ge` +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -138,9 +138,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### Exemple utilisant `where` +#### Example using `where` -Afficher les défis (Challenges) avec résultat `failed` (échec) : +Query challenges with `failed` outcome: ```graphql { @@ -154,7 +154,7 @@ Afficher les défis (Challenges) avec résultat `failed` (échec) : } ``` -Vous pouvez utiliser des suffixes comme `_gt`, `_lte` pour comparer les valeurs : +You can use suffixes like `_gt`, `_lte` for value comparison: #### Exemple de filtrage de plage @@ -186,7 +186,7 @@ Cela peut être utile si vous cherchez à récupérer uniquement les entités qu #### Exemple de filtrage d'entités imbriquées -Un filtrage sur la base d'entités imbriquées est possible dans les champs portant le suffixe `_`. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. Cela peut être utile si vous souhaitez récupérer uniquement les entités dont les entités au niveau enfant remplissent les conditions fournies. @@ -204,9 +204,9 @@ Cela peut être utile si vous souhaitez récupérer uniquement les entités dont #### Opérateurs logiques -Depuis Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0), vous pouvez regrouper plusieurs paramètres dans le même argument `where` en utilisant les opérateurs `et` ou `ou` pour filtrer les résultats en fonction de plusieurs critères. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### Opérateur `AND` +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -222,7 +222,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Sucre syntaxique :** Vous pouvez simplifier la requête ci-dessus en supprimant l'opérateur `et` en passant une sous-expression séparée par des virgules. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -236,7 +236,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### Opérateur `OR` +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -252,7 +252,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Remarque** : lors de la création de requêtes, il est important de prendre en compte l'impact sur les performances de l'utilisation de l'opérateur `ou`. Bien que `ou` puisse être un outil utile pour élargir les résultats de recherche, il peut également avoir des coûts importants. L'un des principaux problèmes avec `ou` est qu'il peut ralentir les requêtes. En effet, `ou` nécessite que la base de données parcoure plusieurs index, ce qui peut prendre du temps. Pour éviter ces problèmes, il est recommandé aux développeurs d'utiliser les opérateurs et à la place de ou chaque fois que cela est possible. Cela permet un filtrage plus précis et peut conduire à des requêtes plus rapides et plus précises. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### Tous les filtres @@ -281,9 +281,9 @@ _not_ends_with _not_ends_with_nocase ``` -> Veuillez noter que certains suffixes ne sont pris en charge que pour des types spécifiques. Par exemple, `Boolean` ne prend en charge que `_not`, `_in` et `_not_in`, mais `_` est disponible uniquement pour les types d’objet et d’interface. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -En outre, les filtres globaux suivants sont disponibles dans le cadre de l'argument `where` : +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(numéro_gte : Int) @@ -291,7 +291,7 @@ _change_block(numéro_gte : Int) ### Interrogation des états précédents -Vous pouvez interroger l'état de vos entités non seulement pour le dernier bloc, qui est la valeur par défaut, mais également pour un bloc arbitraire du passé. Le bloc dans lequel une requête doit se produire peut être spécifié soit par son numéro de bloc, soit par son hachage de bloc en incluant un argument `block` dans les champs de niveau supérieur des requêtes. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -311,7 +311,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Cette requête renverra les entités `Challenge` et les entités `Application` qui leur sont associées, telles qu'elles existaient directement après le traitement du bloc numéro 8 000 000. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### Exemple @@ -327,26 +327,26 @@ Cette requête renverra les entités `Challenge` et les entités `Application` q } ``` -Cette requête renverra les entités `Challenge` et leurs entités `Application` associées, telles qu'elles existaient directement après le traitement du bloc avec le hachage donné. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### Requêtes de recherche en texte intégral -Les champs de requête de recherche en texte intégral fournissent une API de recherche textuelle expressive qui peut être ajoutée au schéma du subgraph et personnalisée. Reportez-vous à [Définition des champs de recherche en texte intégral](/developing/creating-a-subgraph/#defining-fulltext-search-fields) pour ajouter la recherche en texte intégral à votre subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -Les champs de requête de recherche en texte `intégral`, fournissent une Api de recherche de texte expressive qui peut être ajoutée au schéma de soubgraph et personnalisée. Reportez-vous à `Définition des champs de recherche en texte intégral` pour ajouter une recherche en texte intégral à votre subgraph. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Opérateurs de recherche en texte intégral : -| Symbole | Opérateur | Description | -| --- | --- | --- | -| `&` | `And` | Pour combiner plusieurs termes de recherche dans un filtre pour les entités incluant tous les termes fournis | -| | | `Or` | Les requêtes comportant plusieurs termes de recherche séparés par l'opérateur ou renverront toutes les entités correspondant à l'un des termes fournis | -| `<>` | `Follow by` | Spécifiez la distance entre deux mots. | -| `:*` | `Prefix` | Utilisez le terme de recherche de préfixe pour trouver les mots dont le préfixe correspond (2 caractères requis.) | +| Symbole | Opérateur | Description | +| ------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | Pour combiner plusieurs termes de recherche dans un filtre pour les entités incluant tous les termes fournis | +| | | `Or` | Les requêtes comportant plusieurs termes de recherche séparés par l'opérateur ou renverront toutes les entités correspondant à l'un des termes fournis | +| `<->` | `Follow by` | Spécifiez la distance entre deux mots. | +| `:*` | `Prefix` | Utilisez le terme de recherche de préfixe pour trouver les mots dont le préfixe correspond (2 caractères requis.) | #### Exemples -En utilisant l'opérateur `ou`, cette requête filtrera les entités de blog ayant des variations de "anarchism" ou "crumpet" dans leurs champs de texte intégral. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -359,7 +359,7 @@ En utilisant l'opérateur `ou`, cette requête filtrera les entités de blog aya } ``` -L'opérateur `follow by` spécifie un mot à une distance spécifique dans les documents en texte intégral. La requête suivante renverra tous les blogs contenant des variations de "décentraliser" suivies de "philosophie" +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -387,11 +387,11 @@ Combinez des opérateurs de texte intégral pour créer des filtres plus complex ### Validation -Graph Node met en œuvre une validation [basée sur les spécifications](https://spec.graphql.org/October2021/#sec-Validation) des requêtes GraphQL qu'il reçoit à l'aide de [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), qui est basée sur l'implémentation de référence [graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Les requêtes qui échouent à une règle de validation sont accompagnées d'une erreur standard - consultez la [Spécification GraphQL](https://spec.graphql.org/October2021/#sec-Validation) pour en savoir plus. +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -399,13 +399,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### Entities -Tous les types GraphQL avec des directives `@entity` dans votre schéma seront traités comme des entités et doivent avoir un champ `ID`. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **Note:** Actuellement, tous les types de votre schéma doivent avoir une directive `@entity`. À l'avenir, nous traiterons les types sans directive `@entity` comme des objets de valeur, mais cela n'est pas encore possible. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### Métadonnées du Subgraph -Tous les subgraphs ont un objet `_Meta_` auto-généré, qui permet d'accéder aux métadonnées du subgraph. Cet objet peut être interrogé comme suit : +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -423,12 +423,12 @@ Tous les subgraphs ont un objet `_Meta_` auto-généré, qui permet d'accéder a Si un bloc est fourni, les métadonnées sont celles de ce bloc, sinon le dernier bloc indexé est utilisé. S'il est fourni, le bloc doit être postérieur au bloc de départ du subgraph et inférieur ou égal au bloc indexé le plus récent. -`deployment` est un identifiant unique, correspondant au CID IPFS du fichier `subgraph.yaml`. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` fournit des informations sur le dernier bloc (en tenant compte des contraintes de bloc transmises à `_meta`) : +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash : le hash du bloc - number: the block number - timestamp : l'horodatage du bloc, si disponible (ceci n'est actuellement disponible que pour les subgraphs indexant les réseaux EVM) -`hasIndexingErrors` est un booléen identifiant si le subgraph a rencontré des erreurs d'indexation au cours d'un bloc passé +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From 036c2bc8d6ff769769b8e2b3d36dc5a22a2515fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:45 -0500 Subject: [PATCH 0239/1534] New translations graphql-api.mdx (Spanish) --- .../es/subgraphs/querying/graphql-api.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/es/subgraphs/querying/graphql-api.mdx b/website/src/pages/es/subgraphs/querying/graphql-api.mdx index 1954f49601c6..edbcef1a076f 100644 --- a/website/src/pages/es/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/es/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Ejemplos -Consulta por un solo `Token` definido en tu esquema: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ Consulta por un solo `Token` definido en tu esquema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -Consulta todas las entidades `Token`: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### Ejemplo de filtrado de entidades anidadas -A partir de Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0), las entidades se pueden ordenar con base en entidades anidadas. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> Actualmente, puedes ordenar por tipos `String` o `ID` de un solo nivel en campos `@entity` y `@derivedFrom`. Desafortunadamente, [aún no se admite la ordenación por interfaces en entidades de un solo nivel](https://github.com/graphprotocol/graph-node/pull/4058), la ordenación por campos que son matrices y entidades anidadas. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### Paginación @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### Ejemplo usando `first` +#### Example using `first` Consulta los primeros 10 tokens: @@ -101,11 +101,11 @@ Consulta los primeros 10 tokens: } ``` -Para consultar grupos de entidades en medio de una colección, el parámetro `skip` puede utilizarse junto con el parámetro `first` para omitir un número determinado de entidades empezando por el principio de la colección. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### Ejemplo usando `first` y `skip` +#### Example using `first` and `skip` -Consulta 10 entidades `Token`, desplazadas 10 lugares desde el principio de la colección: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ Consulta 10 entidades `Token`, desplazadas 10 lugares desde el principio de la c } ``` -#### Ejemplo usando `first` y `id_ge` +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### Ejemplo usando `where` +#### Example using `where` -Desafíos de consulta con resultado `failed`: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ Desafíos de consulta con resultado `failed`: } ``` -Puedes utilizar sufijos como `_gt`, `_lte` para la comparación de valores: +You can use suffixes like `_gt`, `_lte` for value comparison: #### Ejemplo de filtrado de rango @@ -184,7 +184,7 @@ Esto puede ser útil si buscas obtener solo las entidades que han cambiado, por #### Ejemplo de filtrado de entidades anidadas -El filtrado basado en entidades anidadas es posible en los campos con el sufijo `_`. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. Esto puede ser útil si estás buscando obtener solo entidades cuyas entidades de nivel inicial cumplan con las condiciones proporcionadas. @@ -202,9 +202,9 @@ Esto puede ser útil si estás buscando obtener solo entidades cuyas entidades d #### Operadores lógicos -A partir de Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) puedes agrupar múltiples parámetros en el mismo argumento `where` utilizando los operadores `and` o `or` para filtrar los resultados en base a más de un criterio. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### Operador `AND` +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Azúcar sintáctico**: Puedes simplificar la consulta anterior eliminando el operador `and` pasando una subexpresión separada por comas. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -234,7 +234,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### Operador `OR` +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Nota**: Al construir consultas, es importante considerar el impacto en el rendimiento al utilizar el operador `or`. Si bien `or` puede ser una herramienta útil para ampliar los resultados de búsqueda, también puede tener costos significativos. Uno de los principales problemas con `or` es que puede hacer que las consultas se vuelvan más lentas. Esto se debe a que `or` requiere que la base de datos escanee múltiples índices, lo que puede ser un proceso que consume tiempo. Para evitar estos problemas, se recomienda que los desarrolladores utilicen los operadores and en lugar de or siempre que sea posible. Esto permite un filtrado más preciso y puede llevar a consultas más rápidas y precisas. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### Todos los filtros @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> Ten en cuenta que algunos sufijos solo se admiten para tipos específicos. Por ejemplo, `Boolean` solo admite `_not`, `_in` y `_not_in`, pero `_` está disponible solo para tipos de objeto e interfaz. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -Además, los siguientes filtros globales están disponibles como parte del argumento `where`: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### Consultas sobre Time-travel -Puedes consultar el estado de tus entidades no solo para el último bloque, que es el predeterminado, sino también para un bloque arbitrario en el pasado. El bloque en el que debe ocurrir una consulta se puede especificar por su número de bloque o su hash de bloque al incluir un argumento `block` en los campos de nivel superior de las consultas. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Esta consulta devolverá entidades `Challenge` y sus entidades `Application` asociadas, tal como existían directamente después de procesar el bloque número 8,000,000. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### Ejemplo @@ -325,26 +325,26 @@ Esta consulta devolverá entidades `Challenge` y sus entidades `Application` aso } ``` -Esta consulta devolverá entidades `Challenge` y sus entidades `Application` asociadas, tal como existían directamente después de procesar el bloque con el hash dado. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### Consultas de Búsqueda de Texto Completo -Los campos de consulta de búsqueda de texto completo proporcionan una API de búsqueda de texto expresivo que se puede agregar al esquema del subgrafo y personalizar. Consulta [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) para agregar búsqueda de texto completo a tu subgrafo. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -Las consultas de búsqueda de texto completo tienen un campo obligatorio, `text`, para proporcionar términos de búsqueda. Hay varios operadores especiales de texto completo disponibles para usar en este campo de búsqueda `text`. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Operadores de búsqueda de texto completo: -| Símbolo | Operador | Descripción | -| --- | --- | --- | -| `&` | `And` | Para combinar varios términos de búsqueda en un filtro para entidades que incluyen todos los términos proporcionados | -| | | `O` | Las consultas con varios términos de búsqueda separados por o el operador devolverá todas las entidades que coincidan con cualquiera de los términos proporcionados | -| `<->` | `Follow by` | Especifica la distancia entre dos palabras. | -| `:*` | `Prefijo` | Utilice el término de búsqueda del prefijo para encontrar palabras cuyo prefijo coincida (se requieren 2 caracteres.) | +| Símbolo | Operador | Descripción | +| ------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Para combinar varios términos de búsqueda en un filtro para entidades que incluyen todos los términos proporcionados | +| | | `Or` | Las consultas con varios términos de búsqueda separados por o el operador devolverá todas las entidades que coincidan con cualquiera de los términos proporcionados | +| `<->` | `Follow by` | Especifica la distancia entre dos palabras. | +| `:*` | `Prefix` | Utilice el término de búsqueda del prefijo para encontrar palabras cuyo prefijo coincida (se requieren 2 caracteres.) | #### Ejemplos -Con el operador `or`, esta consulta filtrará las entidades de blog con variaciones de "anarchism" o "crumpet" en sus campos de texto completo. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ Con el operador `or`, esta consulta filtrará las entidades de blog con variacio } ``` -El operador `follow by` especifica palabras separadas por una distancia específica en los documentos de texto completo. La siguiente consulta devolverá todos los blogs con variaciones de "decentralize" seguido por "philosophy" +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ Combina operadores de texto completo para crear filtros más complejos. Con un o ### Validación -Graph Node implementa una validación [basada en especificaciones](https://spec.graphql.org/October2021/#sec-Validation) de las consultas de GraphQL que recibe mediante [graphql-tools-rs](https:// github.com/dotansimha/graphql-tools-rs#validation-rules), que se basa en [implementación de referencia de graphql-js](https://github.com/graphql/graphql-js /tree/main/src/validation). Las consultas que fallan en una regla de validación lo hacen con un error estándar: visita las [especificaciones de GraphQL](https://spec.graphql.org/October2021/#sec-Validation) para obtener más información. +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## Esquema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### Entidades -Todos los tipos de GraphQL con directivas `@entity` en tu esquema se tratarán como entidades y deben tener un campo `ID`. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **Nota:** Actualmente, todos los tipos en tu esquema deben tener una directiva `@entity`. En el futuro, trataremos los tipos sin una directiva `@entity` como objetos de valor, pero esto aún no se admite. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### Metadatos del subgrafo -Todos los subgrafos tienen un objeto `_Meta_` generado automáticamente, que brinda acceso a los metadatos del subgrafo. Esto se puede consultar de la siguiente manera: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ Todos los subgrafos tienen un objeto `_Meta_` generado automáticamente, que bri Si se proporciona un bloque, los metadatos corresponden a ese bloque; de lo contrario, se utiliza el bloque indexado más reciente. Si es proporcionado, el bloque debe ser posterior al bloque de inicio del subgrafo y menor o igual que el bloque indexado más reciente. -`deployment` es un ID único, correspondiente al IPFS CID del archivo `subgraph.yaml`. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` proporciona información sobre el último bloque (teniendo en cuenta cualquier restricción de bloque pasada a `_meta`): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash: el hash del bloque - número: el número de bloque - timestamp: la marca de tiempo del bloque, en caso de estar disponible (actualmente solo está disponible para subgrafos que indexan redes EVM) -`hasIndexingErrors` es un valor booleano que identifica si el subgrafo encontró errores de indexación en algún bloque anterior +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From 8992bf1b585215813c24704c7672a3eaf1d48dd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:46 -0500 Subject: [PATCH 0240/1534] New translations graphql-api.mdx (Arabic) --- .../ar/subgraphs/querying/graphql-api.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/ar/subgraphs/querying/graphql-api.mdx b/website/src/pages/ar/subgraphs/querying/graphql-api.mdx index 9a2c1830062c..8a337a6092a5 100644 --- a/website/src/pages/ar/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ar/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Examples -الاستعلام عن كيان `Token` واحد معرف في مخططك: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,7 +29,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. Query all `Token` entities: @@ -49,7 +49,7 @@ When querying a collection, you may: - Use the `orderBy` parameter to sort by a specific attribute. - Use the `orderDirection` to specify the sort direction, `asc` for ascending or `desc` for descending. -#### مثال +#### Example ```graphql { @@ -295,7 +295,7 @@ The result of such a query will not change over time, i.e., querying at a certai > Note: The current implementation is still subject to certain limitations that might violate these guarantees. The implementation can not always tell that a given block hash is not on the main chain at all, or if a query result by a block hash for a block that is not yet considered final could be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. -#### مثال +#### Example ```graphql { @@ -311,7 +311,7 @@ The result of such a query will not change over time, i.e., querying at a certai This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. -#### مثال +#### Example ```graphql { @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| رمز | عامل التشغيل | الوصف | -| --- | --- | --- | -| `&` | `And` | لدمج عبارات بحث متعددة في فلتر للكيانات التي تتضمن جميع العبارات المتوفرة | -| | | `أو` | الاستعلامات التي تحتوي على عبارات بحث متعددة مفصولة بواسطة عامل التشغيل or ستعيد جميع الكيانات المتطابقة من أي عبارة متوفرة | -| `<->` | `Follow by` | يحدد المسافة بين كلمتين. | -| `:*` | `Prefix` | يستخدم عبارة البحث prefix للعثور على الكلمات التي تتطابق بادئتها (مطلوب حرفان.) | +| رمز | عامل التشغيل | الوصف | +| ------ | ------------ | --------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | لدمج عبارات بحث متعددة في فلتر للكيانات التي تتضمن جميع العبارات المتوفرة | +| | | `Or` | الاستعلامات التي تحتوي على عبارات بحث متعددة مفصولة بواسطة عامل التشغيل or ستعيد جميع الكيانات المتطابقة من أي عبارة متوفرة | +| `<->` | `Follow by` | يحدد المسافة بين كلمتين. | +| `:*` | `Prefix` | يستخدم عبارة البحث prefix للعثور على الكلمات التي تتطابق بادئتها (مطلوب حرفان.) | #### Examples @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## المخطط -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). From be67e36abee0a605f23a001008bd49127626f6df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:47 -0500 Subject: [PATCH 0241/1534] New translations graphql-api.mdx (Czech) --- .../cs/subgraphs/querying/graphql-api.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/cs/subgraphs/querying/graphql-api.mdx b/website/src/pages/cs/subgraphs/querying/graphql-api.mdx index db00f62e2c40..95a4175b7062 100644 --- a/website/src/pages/cs/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/cs/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Příklady -Dotaz na jednu entitu `Token` definovanou ve vašem schématu: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ Dotaz na jednu entitu `Token` definovanou ve vašem schématu: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -Dotaz na všechny entity `Token`: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### Příklad vnořeného třídění entit -Od verze Uzel grafu [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) lze entity třídit na základě vnořených entit. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> V současné době můžete řadit podle jednoúrovňových typů `String` nebo `ID` v polích `@entity` a `@derivedFrom`. Bohužel [třídění podle rozhraní na jednoúrovňových hlubokých entitách](https://github.com/graphprotocol/graph-node/pull/4058), třídění podle polí, která jsou poli, a vnořených entit zatím není podporováno. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### Stránkování @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### Příklad s použitím `first` +#### Example using `first` Dotaz na prvních 10 tokenů: @@ -101,11 +101,11 @@ Dotaz na prvních 10 tokenů: } ``` -Pro dotazování na skupiny entit uprostřed kolekce lze použít parametr `skip` ve spojení s parametrem `first` pro vynechání určitého počtu entit počínaje začátkem kolekce. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### Příklad s použitím `first` a `skip` +#### Example using `first` and `skip` -Dotaz na 10 entit `Token`, posunutých o 10 míst od začátku kolekce: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ Dotaz na 10 entit `Token`, posunutých o 10 míst od začátku kolekce: } ``` -#### Příklad s použitím `first` a `id_ge` +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### Příklad s použitím `where` +#### Example using `where` -Výzvy k dotazu s výsledkem `neúspěšný`: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ Výzvy k dotazu s výsledkem `neúspěšný`: } ``` -Pro porovnání hodnot můžete použít přípony jako `_gt`, `_lte`: +You can use suffixes like `_gt`, `_lte` for value comparison: #### Příklad filtrování rozsahu @@ -184,7 +184,7 @@ To může být užitečné, pokud chcete načíst pouze entity, které se změni #### Příklad vnořeného filtrování entit -Filtrování na základě vnořených entit je možné v polích s příponou `_`. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. To může být užitečné, pokud chcete načíst pouze entity, jejichž entity podřízené úrovně splňují zadané podmínky. @@ -202,9 +202,9 @@ To může být užitečné, pokud chcete načíst pouze entity, jejichž entity #### Logické operátory -Od verze Uzel grafu [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) můžete seskupit více parametrů v jednom argumentu `where` pomocí operátorů `and` nebo `or` a filtrovat výsledky na základě více kritérií. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### Operátor `AND` +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Syntaktický cukr:** Výše uvedený dotaz můžete zjednodušit odstraněním operátoru `a` předáním podvýrazu odděleného čárkami. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -234,7 +234,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### Operátor `OR` +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Poznámka**: Při sestavování dotazů je důležité zvážit dopad použití operátoru `nebo` na výkon. Operátor `nebo` sice může být užitečným nástrojem pro rozšíření výsledků vyhledávání, ale může s sebou nést i značné náklady. Jedním z hlavních problémů operátoru `nebo` je, že může způsobit zpomalení dotazů. Je to proto, že `nebo` vyžaduje, aby databáze prohledala více indexů, což může být časově náročný proces. Abyste se těmto problémům vyhnuli, doporučujeme vývojářům používat operátory and místo or, kdykoli je to možné. To umožňuje přesnější filtrování a může vést k rychlejším a přesnějším dotazům. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### Všechny filtry @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> Upozorňujeme, že některé přípony jsou podporovány pouze pro určité typy. Například `Boolean` podporuje pouze `_not`, `_in` a `_not_in`, ale `_` je k dispozici pouze pro typy objektů a rozhraní. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -Kromě toho jsou jako součást argumentu `where` k dispozici následující globální filtry: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### Dotazy na cestování čase -Můžete se dotazovat na stav entit nejen pro nejnovější blok, což je výchozí nastavení, ale také pro libovolný blok v minulosti. Blok, u kterého má dotaz proběhnout, lze zadat buď číslem bloku, nebo jeho blokovým hashem, a to tak, že do polí toplevel dotazů zahrnete argument `blok`. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Tento dotaz vrátí entity `Challenge` a k nim přiřazené entity `Application` tak, jak existovaly bezprostředně po zpracování bloku číslo 8,000,000. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### Příklad @@ -325,26 +325,26 @@ Tento dotaz vrátí entity `Challenge` a k nim přiřazené entity `Application` } ``` -Tento dotaz vrátí entity `Challenge` a s nimi spojené entity `Application` tak, jak existovaly bezprostředně po zpracování bloku s daným hashem. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### Fulltextové Vyhledávání dotazy -Pole pro fulltextové vyhledávání poskytují expresivní rozhraní API pro textové vyhledávání, které lze přidat do schématu podgrafů a přizpůsobit je. Viz [Definice polí pro fulltextové vyhledávání](/developing/creating-a-subgraph/#defining-fulltext-search-fields) pro přidání fulltextového vyhledávání do podgrafu. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -Fulltextové vyhledávací dotazy mají jedno povinné pole `text` pro zadání hledaných výrazů. V tomto vyhledávacím poli `text` je k dispozici několik speciálních fulltextových operátorů. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Operátory fulltextového vyhledávání: -| Symbol | Operátor | Popis | -| --- | --- | --- | -| `&` | `a` | Pro kombinaci více vyhledávacích výrazů do filtru pro entity, které obsahují všechny zadané výrazy | -| | | `Nebo` | Dotazy s více hledanými výrazy oddělenými operátorem nebo vrátí všechny entity, které odpovídají některému z uvedených výrazů | -| `<->` | `Follow by` | Zadejte vzdálenost mezi dvěma slovy. | -| `:*` | `Prefix` | Pomocí předponového výrazu vyhledejte slova, jejichž předpona se shoduje (vyžadovány 2 znaky) | +| Symbol | Operátor | Popis | +| ------ | ----------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Pro kombinaci více vyhledávacích výrazů do filtru pro entity, které obsahují všechny zadané výrazy | +| | | `Or` | Dotazy s více hledanými výrazy oddělenými operátorem nebo vrátí všechny entity, které odpovídají některému z uvedených výrazů | +| `<->` | `Follow by` | Zadejte vzdálenost mezi dvěma slovy. | +| `:*` | `Prefix` | Pomocí předponového výrazu vyhledejte slova, jejichž předpona se shoduje (vyžadovány 2 znaky) | #### Příklady -Pomocí operátoru `nebo` tento dotaz vyfiltruje entity blogu s variantami slov "anarchism" nebo "crumpet" v jejich fulltextových polích. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ Pomocí operátoru `nebo` tento dotaz vyfiltruje entity blogu s variantami slov } ``` -Operátor `follow by` určuje slova ve fulltextových dokumentech v určité vzdálenosti od sebe. Následující dotaz vrátí všechny blogy s variantami slova "decentralize" následované slovem "philosophy" +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ Kombinací fulltextových operátorů můžete vytvářet složitější filtry. ### Validace -Uzel grafu implementuje ověření [založené na specifikacích](https://spec.graphql.org/October2021/#sec-Validation) dotazů GraphQL, které obdrží, pomocí [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), které je založeno na [referenční implementaci graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Dotazy, které nesplňují ověřovací pravidlo, tak činí se standardní chybou – další informace naleznete na stránce [GraphQL](https://spec.graphql.org/October2021/#sec-Validation). +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### Entities -Všechny typy GraphQL s direktivami `@entity` ve vašem schématu budou považovány za entity a musí mít pole `ID`. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **Poznámka:** V současné době musí mít všechny typy ve vašem schématu direktivu `@entity`. V budoucnu budeme typy bez direktivy `@entity` považovat za hodnotové objekty, ale to zatím není podporováno. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### Metadata podgrafů -Všechny podgrafy mají automaticky generovaný objekt `_Meta_`, který poskytuje přístup k metadatům podgrafu. Na tento objekt se lze dotazovat následujícím způsobem: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ Všechny podgrafy mají automaticky generovaný objekt `_Meta_`, který poskytuj Pokud je uveden blok, metadata se vztahují k tomuto bloku, pokud ne, použije se poslední indexovaný blok. Pokud je blok uveden, musí se nacházet za počátečním blokem podgrafu a musí být menší nebo roven poslednímu Indevovaný bloku. -` deployment` je jedinečné ID, které odpovídá IPFS CID souboru `subgraph.yaml`. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` poskytuje informace o posledním bloku (s přihlédnutím k případným omezením bloku předaným do `_meta`): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash: hash bloku - číslo: číslo bloku - timestamp: časové razítko bloku, pokud je k dispozici (v současné době je k dispozici pouze pro podgrafy indexující sítě EVM) -`hasIndexingErrors` je boolean určující, zda se v podgrafu vyskytly chyby indexování v některém z minulých bloků +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From e544dcd923727a580c8cf5dbbd34b789c7a0af9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:48 -0500 Subject: [PATCH 0242/1534] New translations graphql-api.mdx (German) --- .../de/subgraphs/querying/graphql-api.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/de/subgraphs/querying/graphql-api.mdx b/website/src/pages/de/subgraphs/querying/graphql-api.mdx index 725f90ae9e2c..08ca4a0a483f 100644 --- a/website/src/pages/de/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/de/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Beispiele -Die Abfrage für eine einzelne `Token`-Entität, die in Ihrem Schema definiert ist: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ Die Abfrage für eine einzelne `Token`-Entität, die in Ihrem Schema definiert i } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -Die Abfrage für alle `Token`-Entitäten: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### Beispiel für die Sortierung verschachtelter Entitäten -Ab Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) können Entitäten auf der Basis von verschachtelten Entitäten sortiert werden. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> Derzeit können Sie in den Feldern `@entity` und `@derivedFrom` nach einstufig tiefen `String`- oder `ID`-Typen sortieren. Leider ist das [Sortieren nach Schnittstellen auf Entitäten mit einer Tiefe von einer Ebene](https://github.com/graphprotocol/graph-node/pull/4058), das Sortieren nach Feldern, die Arrays und verschachtelte Entitäten sind, noch nicht unterstützt. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### Pagination @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### Ein Beispiel für die Verwendung von `first` +#### Example using `first` Die Abfrage für die ersten 10 Token: @@ -101,7 +101,7 @@ Die Abfrage für die ersten 10 Token: } ``` -Um Gruppen von Entitäten in der Mitte einer Sammlung abzufragen, kann der Parameter `skip` in Verbindung mit dem Parameter `first` verwendet werden, um eine bestimmte Anzahl von Entitäten beginnend am Anfang der Sammlung zu überspringen. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. #### Example using `first` and `skip` @@ -204,7 +204,7 @@ This can be useful if you are looking to fetch only entities whose child-level e As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### `AND`-Operator +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -295,7 +295,7 @@ The result of such a query will not change over time, i.e., querying at a certai > Note: The current implementation is still subject to certain limitations that might violate these guarantees. The implementation can not always tell that a given block hash is not on the main chain at all, or if a query result by a block hash for a block that is not yet considered final could be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. -#### Example +#### Beispiel ```graphql { @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Symbol | Operator | Beschreibung | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Beschreibung | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Beispiele @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). From 217f9a21601d91bcae2270630ded2c9db9a0b267 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:49 -0500 Subject: [PATCH 0243/1534] New translations graphql-api.mdx (Italian) --- .../it/subgraphs/querying/graphql-api.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/it/subgraphs/querying/graphql-api.mdx b/website/src/pages/it/subgraphs/querying/graphql-api.mdx index 4bcf9a79d1cd..50dd1a4143ef 100644 --- a/website/src/pages/it/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/it/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Esempi -Eseguire query di una singola entità `Token` definita nello schema: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ Eseguire query di una singola entità `Token` definita nello schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -Eseguire query di tutte le entità `Token`: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### Esempio di ordinamento di entità annidate -A partire da Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) le entità possono essere ordinate sulla base delle entità annidate. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> Attualmente, è possibile ordinare per tipi di `String` oppure `ID` profondi un livello sui campi `@entity` e `@derivedFrom`. Purtroppo non è ancora supportato [l'ordinamento per interfacce su entità profonde un livello](https://github.com/graphprotocol/graph-node/pull/4058), l'ordinamento per campi che sono array e entità annidate. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### Impaginazione @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### Esempio di utilizzo di `first` +#### Example using `first` Eseguire query di primi 10 token: @@ -101,11 +101,11 @@ Eseguire query di primi 10 token: } ``` -Per eseguire query di gruppi di entità nel mezzo di una collezione, il parametro `skip` può essere usato insieme al parametro `first` per saltare un numero specifico di entità a partire dall'inizio della collezione. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### Esempio di utilizzo di `first` e `skip` +#### Example using `first` and `skip` -Eseguire query di 10 entità `Token`, sfalsate di 10 posizioni rispetto all'inizio della collezione: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ Eseguire query di 10 entità `Token`, sfalsate di 10 posizioni rispetto all'iniz } ``` -#### Esempio di utilizzo di `first` e `id_ge` +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### Esempio di utilizzo di `where` +#### Example using `where` -Query con esito `failed`: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ Query con esito `failed`: } ``` -È possibile utilizzare suffissi come `_gt`, `_lte` per confrontare i valori: +You can use suffixes like `_gt`, `_lte` for value comparison: #### Esempio di filtraggio dell'intervallo @@ -184,7 +184,7 @@ Questo può essere utile se si vuole recuperare solo le entità che sono cambiat #### Esempio di filtraggio di entità annidate -Il filtraggio sulla base di entità annidate è possibile nei campi con il suffisso `_`. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. Questo può essere utile se si vuole recuperare solo le entità il cui livello di figlio soddisfa le condizioni fornite. @@ -202,9 +202,9 @@ Questo può essere utile se si vuole recuperare solo le entità il cui livello d #### Operatori logici -A partire dalla versione Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) è possibile raggruppare più parametri nello stesso argomento `where` usando gli operatori `and` oppure `or` per filtrare i risultati in base a più di un criterio. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### Operatore `AND` +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Syntactic sugar:** Si può semplificare la query precedente eliminando l'operatore `and` passando una sottoespressione separata da virgole. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -234,7 +234,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### Operatore `OR` +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Nota**: Quando si costruiscono le query, è importante considerare l'impatto sulle prestazioni dell'uso dell'operatore `or`. Sebbene `or` possa essere uno strumento utile per ampliare i risultati della ricerca, può anche avere costi significativi. Uno dei problemi principali di `or` è che può causare un rallentamento delle query. Questo perché `or` richiede al database di eseguire la scansione di più indici, un processo che può richiedere molto tempo. Per evitare questi problemi, si consiglia agli sviluppatori di utilizzare gli operatori e al posto di o quando possibile. Ciò consente di effettuare filtri più precisi e di ottenere query più rapide e precise. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### Tutti i filtri @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> Si noti che alcuni suffissi sono supportati solo per tipi specifici. Ad esempio, `Boolean` supporta solo `_not`, `_in` e `_not_in`, mentre `_` è disponibile solo per i tipi oggetto e interfaccia. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -Inoltre, i seguenti filtri globali sono disponibili come parte dell'argomento `where`: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### Query Time-travel -È possibile effetuare query dello stato delle entità non solo per l'ultimo blocco, che è quello predefinito, ma anche per un blocco nel passato. Il blocco in cui deve avvenire una query può essere specificato dal suo numero di blocco o dal suo hash, includendo un argomento `block` nei campi di livello superiore delle query. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Questa query restituirà le entità `Challenge` e le entità `Application` ad esse associate, così come esistevano direttamente dopo l'elaborazione del blocco numero 8.000.000. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### Esempio @@ -325,26 +325,26 @@ Questa query restituirà le entità `Challenge` e le entità `Application` ad es } ``` -Questa query restituirà le entità `Challenge` e le entità `Application` ad esse associate, così come esistevano direttamente dopo l'elaborazione del blocco con l'hash indicato. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### Query di ricerca fulltext -I campi di ricerca fulltext forniscono un'API di ricerca testuale espressiva che può essere aggiunta allo schema del subgraph e personalizzata. Fare riferimento a [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) per aggiungere la ricerca fulltext al subgraph. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -Le query di ricerca fulltext hanno un campo obbligatorio, `text`, per fornire i termini di ricerca. In questo campo di ricerca `text` sono disponibili diversi operatori speciali per il fulltext. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Operatori di ricerca fulltext: -| Simbolo | Operatore | Descrizione | -| --- | --- | --- | -| `&` | `And` | Per combinare più termini di ricerca in un filtro per le entità che includono tutti i termini forniti | -| | | `Or` | Le query con più termini di ricerca separati dall'operatore Or restituiranno tutte le entità con una corrispondenza tra i termini forniti | -| `<->` | `Follow by` | Specifica la distanza tra due parole. | -| `:*` | `Prefisso` | Utilizzare il termine di ricerca del prefisso per trovare le parole il cui prefisso corrisponde (sono richiesti 2 caratteri.) | +| Simbolo | Operatore | Descrizione | +| ------- | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Per combinare più termini di ricerca in un filtro per le entità che includono tutti i termini forniti | +| | | `Or` | Le query con più termini di ricerca separati dall'operatore Or restituiranno tutte le entità con una corrispondenza tra i termini forniti | +| `<->` | `Follow by` | Specifica la distanza tra due parole. | +| `:*` | `Prefix` | Utilizzare il termine di ricerca del prefisso per trovare le parole il cui prefisso corrisponde (sono richiesti 2 caratteri.) | #### Esempi -Utilizzando l'operatore `or`, questa query filtrerà le entità blog con variazioni di "anarchism" o "crumpet" nei loro campi fulltext. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ Utilizzando l'operatore `or`, questa query filtrerà le entità blog con variazi } ``` -L'operatore `follow by` specifica le parole a una distanza specifica nei documenti fulltext. La seguente query restituirà tutti i blog con variazioni di "decentralize" seguite da "philosophy" +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ Combinare gli operatori fulltext per creare filtri più complessi. Con un operat ### Validazione -Graph Node implementa la validazione [basata sulle specifiche](https://spec.graphql.org/October2021/#sec-Validation) delle query GraphQL che riceve utilizzando [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), che si basa sull'[implementazione di riferimento di graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Le query che non rispettano una regola di validazione vengono segnalate con un errore standard - per saperne di più, visitare le [specifiche di GraphQL](https://spec.graphql.org/October2021/#sec-Validation). +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### Entità -Tutti i tipi GraphQL con direttive `@entity` nello schema saranno trattati come entità e devono avere un campo `ID`. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **Nota:** Attualmente, tutti i tipi nello schema devono avere una direttiva `@entity`. In futuro, i tipi senza direttiva `@entity` saranno trattati come oggetti valore, ma questo non è ancora supportato. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### Metadati del Subgraph -Tutti i subgraph hanno un oggetto `_Meta_` autogenerato, che fornisce accesso ai metadati del subgraph. Questo oggetto può essere interrogato come segue: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ Tutti i subgraph hanno un oggetto `_Meta_` autogenerato, che fornisce accesso ai Se viene fornito un blocco, i metadati si riferiscono a quel blocco, altrimenti viene utilizzato il blocco indicizzato più recente. Se fornito, il blocco deve essere successivo al blocco iniziale del subgraph e inferiore o uguale al blocco indicizzato più recente. -`deployment` è un ID unico, corrispondente al CID IPFS del file `subgraph.yaml`. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` fornisce informazioni sull'ultimo blocco (tenendo conto di eventuali vincoli di blocco passati a `_meta`): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash: l'hash del blocco - numero: il numero del blocco - timestamp: il timestamp del blocco, se disponibile (attualmente è disponibile solo per i subgraph che indicizzano le reti EVM) -`hasIndexingErrors` è un booleano che identifica se il subgraph ha incontrato errori di indicizzazione in qualche blocco passato +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From a571d9310c963ed70fabda8b5857cff0570f1e32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:50 -0500 Subject: [PATCH 0244/1534] New translations graphql-api.mdx (Japanese) --- .../ja/subgraphs/querying/graphql-api.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/ja/subgraphs/querying/graphql-api.mdx b/website/src/pages/ja/subgraphs/querying/graphql-api.mdx index 1da73d118822..61502d11d7bd 100644 --- a/website/src/pages/ja/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ja/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### 例 -スキーマで定義された 1 つの`Token`エンティティに対するクエリ: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -すべての `Token` エンティティをクエリします。 +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### ネストされたエンティティの並べ替えの例 -グラフ ノード [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) の時点で、エンティティを並べ替えることができますネストされたエンティティに基づいています。 +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> 現在、`@entity` および `@derivedFrom` フィールドで、1 レベルの深い `String` または `ID` 型で並べ替えることができます。残念ながら、[1 レベルの深さのエンティティのインターフェイスによる並べ替え](https://github.com/graphprotocol/graph-node/pull/4058)、配列およびネストされたエンティティであるフィールドによる並べ替えは、まだサポートされていません。 +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### ページネーション @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### `first` を使用した例 +#### Example using `first` 最初の 10 個のトークンを照会します。 @@ -101,11 +101,11 @@ When querying a collection, it's best to: } ``` -コレクションの途中にあるエンティティのグループをクエリするには、`skip` パラメータを `first` パラメータと組み合わせて使用​​して、最初から指定された数のエンティティをスキップできます。コレクションの。 +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### `first` と `skip` を使用した例 +#### Example using `first` and `skip` -コレクションの先頭から 10 桁ずれた 10 個の `Token` エンティティをクエリします。 +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ When querying a collection, it's best to: } ``` -#### `first` と `id_ge` を使用した例 +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### `where` を使用した例 +#### Example using `where` -`failed` 結果のクエリ チャレンジ: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r } ``` -値の比較には、`_gt`、`_lte` などのサフィックスを使用できます。 +You can use suffixes like `_gt`, `_lte` for value comparison: #### 範囲フィルタリングの例 @@ -184,7 +184,7 @@ You can also filter entities that were updated in or after a specified block wit #### ネストされたエンティティ フィルタリングの例 -`_` サフィックスが付いたフィールドでは、ネストされたエンティティに基づくフィルタリングが可能です。 +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. これは、子レベルのエンティティが指定された条件を満たすエンティティのみをフェッチする場合に役立ちます。 @@ -202,9 +202,9 @@ You can also filter entities that were updated in or after a specified block wit #### 論理演算子 -Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) の時点で、複数のグループをグループ化できます同じ `where` 引数で `and` または `or` 演算子を使用して複数の基準に基づいて結果をフィルタリングします。 +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### `AND` 演算子 +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **シンタックス シュガー:** コンマで区切られた部分式を渡すことで `and` 演算子を削除することで、上記のクエリを簡素化できます。 +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -234,7 +234,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### `OR` 演算子 +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **注意**:クエリを構築する際には、`または`演算子の使用によるパフォーマンスへの影響を考慮することが重要です。`または`は検索結果を広げるための便利なツールとなり得ますが、重要なコストも伴います。`または`の主な問題の1つは、クエリの遅延を引き起こす可能性があることです。これは、`または`がデータベースに複数のインデックスをスキャンする必要があるため、時間のかかるプロセスとなるからです。これらの問題を避けるために、開発者は可能な限りまたはの代わりにかつ演算子を使用することが推奨されます。これにより、より正確なフィルタリングが可能となり、より高速で正確なクエリが実行できるでしょう。 +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### すべてのフィルター @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> 一部の接尾辞は、特定のタイプでのみサポートされていることに注意してください。たとえば、`Boolean` は `_not`、`_in`、および `_not_in` のみをサポートしますが、`_` はサポートしません。オブジェクト型とインターフェイス型でのみ使用できます。 +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -さらに、次のグローバル フィルターを `where` 引数の一部として使用できます。 +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### タイムトラベル クエリ -デフォルトである最新のブロックだけでなく、過去の任意のブロックについてもエンティティの状態を照会できます。クエリが発生するブロックは、クエリのトップレベル フィールドに `block` 引数を含めることで、ブロック番号またはブロック ハッシュのいずれかで指定できます。 +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -このクエリは、ブロック番号 8,000,000 を処理した直後に存在していた Challenge エンティティとそれに関連する Application エンティティを返します。 +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### 例 @@ -325,26 +325,26 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -このクエリは `Challenge` エンティティとそれに関連付けられた `Application` エンティティを返します。これは、指定されたハッシュでブロックを処理した直後に存在していたためです。 +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### 全文検索クエリ -フルテキスト検索クエリフィールドは、サブグラフスキーマに追加してカスタマイズできる、表現力豊かなテキスト検索 API を提供します。サブグラフにフルテキスト検索を追加するには、「[Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields)」を参照してください。 +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -全文検索クエリには、検索語を提供するための必須フィールド `text` が 1 つあります。この `text` 検索フィールドでは、いくつかの特別な全文演算子を使用できます。 +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. 全文検索演算子: -| シンボル | オペレーター | 説明書き | -| --- | --- | --- | -| `&` | `と` | 複数の検索語を組み合わせて、指定したすべての検索語を含むエンティティをフィルタリングします。 | -| | | `Or` | 複数の検索語をオペレーターで区切って検索すると、指定した語のいずれかにマッチするすべてのエンティティが返されます。 | -| `<->` | `Follow by` | 2 つの単語の間の距離を指定します。 | -| `:*` | `プレフィックス` | プレフィックス検索語を使って、プレフィックスが一致する単語を検索します(2 文字必要) | +| シンボル | オペレーター | 説明書き | +| ------ | ----------- | --------------------------------------------------------- | +| `&` | `And` | 複数の検索語を組み合わせて、指定したすべての検索語を含むエンティティをフィルタリングします。 | +| | | `Or` | 複数の検索語をオペレーターで区切って検索すると、指定した語のいずれかにマッチするすべてのエンティティが返されます。 | +| `<->` | `Follow by` | 2 つの単語の間の距離を指定します。 | +| `:*` | `Prefix` | プレフィックス検索語を使って、プレフィックスが一致する単語を検索します(2 文字必要) | #### 例 -`or` 演算子を使用すると、このクエリはフルテキスト フィールドに「anarchism」または「crumpet」のいずれかのバリエーションを持つブログ エンティティにフィルター処理されます。 +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -`follow by` 演算子は、フルテキスト ドキュメント内で特定の距離だけ離れた単語を指定します。次のクエリは、"decentralize" の後に "philosophy" が続くすべてのブログを返します。 +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ The result of such a query will not change over time, i.e., querying at a certai ### 認証 -グラフ ノードは、受信した GraphQL クエリの [仕様ベース](https://spec.graphql.org/October2021/#sec-Validation) の検証を実装します[graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules),これはに基づいています[graphql-js リファレンス実装](https://github.com/graphql/graphql-js/tree/main/src/validation).検証ルールに失敗したクエリは、標準エラーで失敗します - にアクセスしてください詳細については、[GraphQL 仕様](https://spec.graphql.org/October2021/#sec-Validation)をご覧ください。 +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## スキーマ -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### エンティティ -スキーマ内の `@entity` ディレクティブを持つすべての GraphQL タイプはエンティティとして扱われ、 `ID` フィールドが必要です。 +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **注:** 現在、スキーマ内のすべてのタイプに `@entity` ディレクティブが必要です。将来的には、`@entity` ディレクティブのない型を値オブジェクトとして扱いますが、これはまだサポートされていません。 +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### サブグラフ メタデータ -すべてのサブグラフには、サブグラフ メタデータへのアクセスを提供する、自動生成された `_Meta_` オブジェクトがあります。これは、次のように照会できます。 +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ブロックが提供されている場合、メタデータはそのブロックのものであり、そうでない場合は、最新のインデックス付きブロックが使用されます。提供される場合、ブロックはサブグラフの開始ブロックの後にあり、最後にインデックス付けされたブロック以下でなければなりません。 -`deployment` は、`subgraph.yaml` ファイルの IPFS CID に対応する一意の ID です。 +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` は、最新のブロックに関する情報を提供します (`_meta` に渡されたブロック制約を考慮します): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash: ブロックのハッシュ - number: ブロック番号 - timestamp: 可能であれば、ブロックのタイムスタンプ (これは現在、EVMネットワークのインデックスを作成するサブグラフでのみ利用可能) -`hasIndexingErrors` は、サブグラフが過去のブロックでインデックス作成エラーに遭遇したかどうかを識別するブール値です +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From 6f196fbb983bdf765387a51023986afd4e1596ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:51 -0500 Subject: [PATCH 0245/1534] New translations graphql-api.mdx (Korean) --- .../pages/ko/subgraphs/querying/graphql-api.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ko/subgraphs/querying/graphql-api.mdx b/website/src/pages/ko/subgraphs/querying/graphql-api.mdx index f9176794ae2b..d93f73706ec6 100644 --- a/website/src/pages/ko/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ko/subgraphs/querying/graphql-api.mdx @@ -29,7 +29,7 @@ Query for a single `Token` entity defined in your schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. Query all `Token` entities: @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). From b83b4e3113d91d6e7285c0b0808e14d2d8912dca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:52 -0500 Subject: [PATCH 0246/1534] New translations graphql-api.mdx (Dutch) --- .../pages/nl/subgraphs/querying/graphql-api.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/nl/subgraphs/querying/graphql-api.mdx b/website/src/pages/nl/subgraphs/querying/graphql-api.mdx index f9176794ae2b..d93f73706ec6 100644 --- a/website/src/pages/nl/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/nl/subgraphs/querying/graphql-api.mdx @@ -29,7 +29,7 @@ Query for a single `Token` entity defined in your schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. Query all `Token` entities: @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). From 687d4f303a60c7e98e35ada3ee664514e0b2d3f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:53 -0500 Subject: [PATCH 0247/1534] New translations graphql-api.mdx (Polish) --- .../pages/pl/subgraphs/querying/graphql-api.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/pl/subgraphs/querying/graphql-api.mdx b/website/src/pages/pl/subgraphs/querying/graphql-api.mdx index f9176794ae2b..d93f73706ec6 100644 --- a/website/src/pages/pl/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/pl/subgraphs/querying/graphql-api.mdx @@ -29,7 +29,7 @@ Query for a single `Token` entity defined in your schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. Query all `Token` entities: @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). From ed3fe93e9ae22ff072fba368ac97a88300cb1e10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:54 -0500 Subject: [PATCH 0248/1534] New translations graphql-api.mdx (Portuguese) --- .../pt/subgraphs/querying/graphql-api.mdx | 116 +++++++++--------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/website/src/pages/pt/subgraphs/querying/graphql-api.mdx b/website/src/pages/pt/subgraphs/querying/graphql-api.mdx index 84f85a89bf44..41d0c201404a 100644 --- a/website/src/pages/pt/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/pt/subgraphs/querying/graphql-api.mdx @@ -2,23 +2,23 @@ title: API GraphQL --- -Learn about the GraphQL Query API used in The Graph. +Aprenda sobre a API de Queries da GraphQL, usada no The Graph. -## What is GraphQL? +## O Que é a GraphQL? -[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. +A [GraphQL](https://graphql.org/learn/) é uma linguagem de queries para APIs e um sistema de tempo de execução (runtime) para executar esses queries, com os seus dados já existentes. O The Graph usa a GraphQL para fazer queries em subgraphs. -To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). +Para entender melhor o papel maior da GraphQL, veja [como desenvolver](/subgraphs/developing/introduction/) e [criar um subgraph](/developing/creating-a-subgraph/). -## Queries with GraphQL +## Queries com a GraphQL -In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. +No seu schema de subgraph, você definirá tipos chamados `Entities` ("Entidades"). Para cada tipo `Entity`, campos `entity` e `entities` serão gerados no tipo de nível superior `Query`. -> Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. +> Observação: `query` não precisa ser incluído no topo do query `graphql` enquanto usar o The Graph. ### Exemplos -Um query para uma única entidade `Token` definida no seu schema: +Query para uma entidade `Token` definida no seu schema: ```graphql { @@ -29,9 +29,9 @@ Um query para uma única entidade `Token` definida no seu schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Nota: Ao fazer um query para uma única entidade, é obrigatório um campo `id`; este deve ser escrito como string. -Consulte todas as entidades `Token`: +Faça um query de todas as entidades `Token`: ```graphql { @@ -44,10 +44,10 @@ Consulte todas as entidades `Token`: ### Organização -When querying a collection, you may: +Quando fizer um query de uma coleção, você poderá: -- Use the `orderBy` parameter to sort by a specific attribute. -- Use the `orderDirection` to specify the sort direction, `asc` for ascending or `desc` for descending. +- Use o parâmetro `orderBy` para organizar os resultados por atributos específicos. +- Use `orderDirection` para especificar a direção da organização; `asc` para ordem crescente ou `desc` para decrescente. #### Exemplo @@ -62,9 +62,9 @@ When querying a collection, you may: #### Exemplo para organização de entidade aninhada -Desde o Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0), as entidades podem ser organizadas com base nas entidades aninhadas. +Com o lançamento da versão [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) do Graph Node, as entidades podem ser organizadas com base nas entidades aninhadas. -The following example shows tokens sorted by the name of their owner: +No seguinte exemplo, organizamos os tokens pelo nome do proprietário: ```graphql { @@ -77,18 +77,18 @@ The following example shows tokens sorted by the name of their owner: } ``` -> Atualmente, pode organizar por tipos `String` or `ID` de nível único em campos `@entity` e `@derivedFrom`. Infelizmente, [a organização por interfaces em entidades de nível único](https://github.com/graphprotocol/graph-node/pull/4058), que é a organização por campos que são arranjos e entidades aninhadas, ainda não tem apoio. +> Atualmente, pode-se organizar por tipos `String` or `ID` de nível único em campos `@entity` e `@derivedFrom`. Infelizmente, ainda não há apoio para [organização por interfaces em entidades de nível único](https://github.com/graphprotocol/graph-node/pull/4058), que é a organização por campos que são arranjos e entidades aninhadas. ### Paginação -When querying a collection, it's best to: +Quando fizer um query de uma coleção, recomendamos: -- Use the `first` parameter to paginate from the beginning of the collection. - - The default sort order is by `ID` in ascending alphanumeric order, **not** by creation time. -- Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. -- Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. +- Usar o parâmetro `first` para paginar a partir do início da coleção. + - A ordem padrão de organização é por `ID` em ordem alfanumérica crescente, **não** por data e hora de criação. +- Usar o parâmetro `skip` para pular entidades e paginar. Por exemplo: `first:100` mostra as primeiras 100 entidades e `first:100, skip:100` mostra as próximas 100. +- Evitar usar valores `skip` em queries, já que o desempenho destes normalmente não é bom. Para resgatar um grande volume de itens, recomendamos paginar entidades com base num atributo, conforme demonstrado no exemplo acima. -#### Exemplo com o `first` +#### Exemplo com `first` Consulte os 10 primeiros tokens: @@ -101,11 +101,11 @@ Consulte os 10 primeiros tokens: } ``` -Para queries sobre grupos de entidades no meio de uma coleção, o parâmetro `skip` serve em conjunto com o parâmetro `first` para pular um número especificado de entidades, a começar no início da coleção. +Para fazer queries sobre grupos de entidades no meio de uma coleção, o parâmetro `skip` pode ser usado em conjunto com o parâmetro `first` para pular um número especificado de entidades, a começar no início da coleção. #### Exemplo com `first` e `skip` -Consulte 10 entidades `Token`, deslocada por 10 posições do começo da coleção: +Faça query de 10 entidades Token, deslocadas por 10 posições do começo da coleção: ```graphql { @@ -118,7 +118,7 @@ Consulte 10 entidades `Token`, deslocada por 10 posições do começo da coleç #### Exemplo com `first` e `id_ge` -If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: +Se um cliente precisar retirar um grande volume de entidades, é muito mais eficiente basear queries num atributo e filtrar pelo mesmo. Por exemplo, um cliente retiraria um número grande de tokens com este query: ```graphql query manyTokens($lastID: String) { @@ -129,16 +129,16 @@ query manyTokens($lastID: String) { } ``` -The first time, it would send the query with `lastID = ""`, and for subsequent requests it would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. +Na primeira vez, ele enviaria o query com `lastID = ""`, e nas solicitações seguintes, configuraria `lastID` no atributo `id` da última entidade no pedido anterior. Este método será mais eficiente do que usar valores `skip` crescentes. ### Filtragem -- You can use the `where` parameter in your queries to filter for different properties. -- You can filter on multiple values within the `where` parameter. +- O parâmetro `where` pode ser usado nos seus queries para filtrar propriedades diferentes. +- Vários valores podem ser filtrados dentro do parâmetro `where`. #### Exemplo com `where` -Faça um query sobre o `challenges` com o resultado `failed`: +Faça um query sobre desafios com o resultado `failed` (falha): ```graphql { @@ -152,7 +152,7 @@ Faça um query sobre o `challenges` com o resultado `failed`: } ``` -Pode usar sufixos como `_gt`, `_lte` para comparar valores: +É possível usar sufixos como `_gt`, ou `_lte`, para comparar valores: #### Exemplo para filtragem de alcance @@ -168,7 +168,7 @@ Pode usar sufixos como `_gt`, `_lte` para comparar valores: #### Exemplo para filtragem de blocos -You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. +Também dá para filtrar entidades atualizadas dentro de, ou depois de, um bloco específico com `_change_block(number_gte: Int)`. Isto pode servir caso mire retirar apenas entidades que mudaram, por exemplo, desde a última vez que você pesquisou. Também pode ser bom investigar ou debugar como as entidades mudam no seu subgraph (se combinado com um filtro de blocos, pode isolar apenas entidades que mudaram em um bloco específico). @@ -184,7 +184,7 @@ Isto pode servir caso mire retirar apenas entidades que mudaram, por exemplo, de #### Exemplo para filtragem de entidade ninhada -A filtragem na base de entidades ninhadas é possível nos campos com o sufixo `_`. +É possível usar filtros com base em entidades aninhadas nos campos com o sufixo `_`. Isto é bom caso mire retirar apenas entidades cujas entidades de nível-filho correspondem às condições requeridas. @@ -202,11 +202,11 @@ Isto é bom caso mire retirar apenas entidades cujas entidades de nível-filho c #### Operadores lógicos -Desde o Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0), pode agrupar vários parâmetros no mesmo argumento `where` com operadores `and` ou `or`, para filtrar resultados com base em mais de um critério. +Com o lançamento da versão [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) do Graph Node, é possível agrupar vários parâmetros no mesmo argumento `where`, com os operadores `and` ou `or`, para filtrar resultados com base em mais de um critério. ##### Operador `AND` -The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. +O exemplo a seguir filtra por desafios com o `outcome` ("resultado") `succeeded` ("sucesso"), e `number` ("número") maior que ou igual a `100`. ```graphql { @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Açúcar sintático**: pode simplificar a consulta acima ao retirar o operador `and`, com o passe de uma subexpressão separada por vírgulas. +> **Açúcar sintático:** O query acima pode ser simplificado ao retirar o operador `and`, com o passe de uma sub-expressão separada por vírgulas. > > ```graphql > { @@ -236,7 +236,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num ##### Operador `OR` -The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. +O exemplo a seguir filtra desafios com o `outcome` `succeeded`, e o `number` maior que ou igual a `100`. ```graphql { @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Nota:** Ao construir consultas, é importante considerar o impacto do desempenho do operador `or`. Enquanto o `or` pode ser útil para ampliar os resultados da busca, ele também pode ter custos significativos. Um dos maiores problemas com o `or` é que pode reduzir a velocidade das consultas. Isto é porque o `or` exige que o banco de dados escaneie por vários indexes, o que pode demorar muito. Para evitar estes problemas, recomendamos que os programadores usem operadores `and` em vez de `or` sempre que possível. Isto retorna filtragens mais precisas, e pode levar a consultas mais rápidas e confiáveis. +> **Observação**: Ao construir queries, é importante considerar o impacto do desempenho do operador `or`. O `or` pode ser útil para ampliar os resultados da busca, mas também pode ter custos significativos. Um dos maiores problemas com o `or` é que pode desacelerar os queries. Isto é porque o `or` exige que o banco de dados escaneie através de vários índices, o que pode demorar muito. Para evitar estes problemas, recomendamos que os programadores usem operadores `and` em vez de `or` sempre que possível. Isto retorna filtragens mais precisas, e pode levar a queries mais rápidos e confiáveis. #### Todos os Filtros @@ -289,11 +289,11 @@ _change_block(number_gte: Int) ### Consultas de viagem no tempo -Pode consultar o estado das suas entidades não só para o bloco mais recente, que é o padrão, mas também para um bloco arbitrário no passado. O bloco em que acontece um query pode ser especificado pelo seu número, ou pelo seu hash, ao incluir um argumento `block` nos campos de nível alto de query. +É possível solicitar o estado das suas entidades não só para o bloco mais recente, que é o padrão, mas também para um bloco arbitrário no passado. O bloco em que acontece um query pode ser especificado pelo seu número, ou pelo seu hash, ao incluir um argumento `block` nos campos de nível superior de query. -The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. +O resultado de um query assim não mudará com o tempo; por exemplo, queries num certo bloco passado retornarão o mesmo resultado, independente de quando ele for executado. Exceto que, se fizer um query num bloco muito perto do topo da chain, o resultado pode mudar se aquele bloco acabar ausente da chain principal e a chain for reorganizada. Quando um bloco puder ser considerado final, o resultado do query não mudará. -> Note: The current implementation is still subject to certain limitations that might violate these guarantees. The implementation can not always tell that a given block hash is not on the main chain at all, or if a query result by a block hash for a block that is not yet considered final could be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. +> Observação: a implementação atual ainda está sujeita a certas limitações que podem violar estas garantias. A implementação nem sempre percebe que um hash de bloco não está na chain principal, ou que um query por hash de bloco retorna um bloco que não pode ser considerado final, mas que pode ser influenciado por uma reorganização de bloco executada concorrente com a solicitação. Elas não afetam os resultados de queries por hash de bloco quando o bloco é final e tem sua presença conhecida na chain principal. [Este inquérito](https://github.com/graphprotocol/graph-node/issues/1405) explica estas limitações em detalhes. #### Exemplo @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Esta consulta retornará entidades `Challenge` e as entidades `Application` associadas, já que existiram diretamente após processar o bloco de número 8.000.000. +Este query retornará entidades `Challenge` e as suas entidades `Application` associadas, já que existiram diretamente após processar o bloco de número 8.000.000. #### Exemplo @@ -325,26 +325,26 @@ Esta consulta retornará entidades `Challenge` e as entidades `Application` asso } ``` -Esta consulta retornará entidades `Challenge` e as entidades `Application` associadas, já que existiram diretamente após processar o bloco com o hash dado. +Este query retornará entidades `Challenge` e as suas entidades `Application` associadas, já que existiram diretamente após processar o bloco com o hash indicado. ### Consultas de Busca Fulltext -Campos de busca fulltext fornecem uma API de busca de texto expressiva, que pode ser adicionada e personalizada ao schema do subgraph. Refira ao [Definindo Campos de Busca Fulltext](/developing/creating-a-subgraph/#defining-fulltext-search-fields) para adicionar buscas fulltext ao seu subgraph. +Campos de busca em full-text fornecem uma API de busca de texto expressiva, que pode ser adicionada e personalizada ao schema do subgraph. Para adicionar buscas em full-text ao seu subgraph, veja [Como Definir Campos de Busca em Full-Text](/developing/creating-a-subgraph/#defining-fulltext-search-fields). -Buscas fulltext têm um campo obrigatório, `text`, para ofertar termos de busca. Vários operadores especiais de fulltext estão disponíveis para uso neste campo de busca `text`. +Buscas em full-text têm um campo obrigatório, `text`, para ofertar termos de busca. Vários operadores especiais de full-text estão disponíveis para uso neste campo de busca `text`. Operadores de busca fulltext: -| Símbolo | Operador | Descrição | -| --- | --- | --- | -| `&` | `And` | Para combinar múltiplos termos de busca num filtro para entidades que incluem todos os termos fornecidos | -| | | `Or` | Consultas com vários termos de busca separados pelo operador `or` retornarão todas as entidades com uma correspondência de qualquer termo providenciado | -| `<->` | `Follow by` | Especifica a distância entre duas palavras. | -| `:*` | `Prefix` | Use o prefixo para encontrar palavras que correspondem a tal prefixo (2 caracteres necessários.) | +| Símbolo | Operador | Descrição | +| ------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | Para combinar múltiplos termos de busca num filtro para entidades que incluem todos os termos fornecidos | +| | | `Or` | Consultas com vários termos de busca separados pelo operador or retornarão todas as entidades com uma correspondência de qualquer termo providenciado | +| `<->` | `Follow by` | Especifica a distância entre duas palavras. | +| `:*` | `Prefix` | Use o prefixo para encontrar palavras que correspondem a tal prefixo (2 caracteres necessários.) | #### Exemplos -Ao usar o operador `or`, esta consulta filtrará para entidades com variações de "anarchism" ou "crumpet" nos seus campos fulltext. +Com o operador `or`, esta consulta filtrará para entidades de blog com variações de "anarchism" ou "crumpet" nos seus campos fulltext. ```graphql { @@ -357,7 +357,7 @@ Ao usar o operador `or`, esta consulta filtrará para entidades com variações } ``` -O operador `follow by` especifica palavras separadas por uma distância específica nos documentos fulltext. A seguinte consulta retornará todos os blogs com variações de "decentralize" ("descentralizar") seguido por "philosophy" ("filosofia") +O operador `follow by` especifica palavras separadas por uma distância específica nos documentos fulltext. A query a seguir retornará todos os blogs com variações de "decentralize" ("descentralizar") seguido por "philosophy" ("filosofia") ```graphql { @@ -385,25 +385,25 @@ Combine operadores de fulltext para fazer filtros mais complexos. Com um operado ### Validação -O Graph Node implementa uma validação [baseada em especificação](https://spec.graphql.org/October2021/#sec-Validation) dos queries GraphQL que recebe usando o [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), que tem base na [implementação de referência do graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries que não seguem uma regra de validação respondem com um erro padrão — visite a [spec do GraphQL](https://spec.graphql.org/October2021/#sec-Validation) para saber mais. +O Graph Node implementa validações [baseadas em especificação](https://spec.graphql.org/October2021/#sec-Validation) dos queries da GraphQL que recebe, através do [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules) — que tem base na [referência de implementação graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries que não aderem a uma regra de validação fazem isso com um erro comum — para saber mais, visite as [especificações da GraphQL](https://spec.graphql.org/October2021/#sec-Validation). ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +O schema dos seus dataSources — por exemplo, os tipos de entidade, valores, e conexões que podem ser solicitados num query — é definido através da [Linguagem de Definição de Interface da GraphQL (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). +Os schemas GraphQL geralmente definem tipos de origem para `queries` (solicitações), `subscriptions` (inscrições) e `mutations` (mutações). O The Graph só apoia `queries`. A origem `Query` para o seu subgraph é gerada automaticamente a partir do schema GraphQL incluído no [manifest do seu subgraph](/developing/creating-a-subgraph/#components-of-a-subgraph). -> Note: Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. +> Nota: A nossa API não expõe mutações, porque esperamos que os programadores emitam transações diretamente dos seus aplicativos perante a blockchain subjacente. ### Entidades Todos os tipos GraphQL com diretivos `@entity` no seu schema serão tratados como entidades, e devem ter um campo `ID`. -> **Nota:** Atualmente, todos os tipos no seu schema devem ter um diretivo `@entity`. No futuro, trataremos tipos sem um diretivo `@entity` como objetos de valor, mas ainda não há apoio a isto. +> **Observação:** Atualmente, todos os tipos no seu schema devem ter um diretivo `@entity`. No futuro, trataremos tipos sem um diretivo `@entity` como objetos de valor, mas ainda não há apoio a isto. ### Metadados de Subgraph -Todos os subgraphs devem ter um objeto `_Meta_` gerado automaticamente, que permite acesso aos metadados do subgraph. Isto pode ser consultado num query como o seguinte: +Todos os subgraphs devem ter um objeto `_Meta_` gerado automaticamente, que permite acesso aos metadados do subgraph. Isto pode ser solicitado num query como o query mostrado a seguir: ```graphQL { From 8a8abe4ecbf3338cde5abfc4b418731709fc12f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:55 -0500 Subject: [PATCH 0249/1534] New translations graphql-api.mdx (Russian) --- .../ru/subgraphs/querying/graphql-api.mdx | 86 +++++++++---------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/website/src/pages/ru/subgraphs/querying/graphql-api.mdx b/website/src/pages/ru/subgraphs/querying/graphql-api.mdx index 64814ee92016..47d25a088eca 100644 --- a/website/src/pages/ru/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ru/subgraphs/querying/graphql-api.mdx @@ -4,7 +4,7 @@ title: API GraphQL Learn about the GraphQL Query API used in The Graph. -## What is GraphQL? +## Что такое GraphQL? [GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Примеры -Запрос на один объект `Token`, определенный в Вашей схеме: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -Запрос всех объектов `Токен`: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### Пример сортировки вложенных объектов -Начиная с Graph Node, объекты [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) можно сортировать на основе вложенных содержаний. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> В настоящее время Вы можете осуществлять сортировку по одно уровневым типам `String` или `ID` в полях `@entity` и `@derivedFrom`. К сожалению, [сортировка по интерфейсам на одно уровневых структурах](https://github.com/graphprotocol/graph-node/pull/4058), сортировка по полям, которые являются массивами и вложенными объектами, еще не поддерживается. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### Пагинация @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### Пример использования `first` +#### Example using `first` Запрос первых 10 токенов: @@ -101,11 +101,11 @@ When querying a collection, it's best to: } ``` -Чтобы запросить группы объектов в середине коллекции, параметр `skip` можно использовать в сочетании с параметром `first`, чтобы пропустить указанное количество объектов, начиная с начала коллекции. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### Пример использования `first` и `skip` +#### Example using `first` and `skip` -Запрос 10 объектов `Token`, смещенных на 10 позиций от начала коллекции: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ When querying a collection, it's best to: } ``` -#### Пример использования `first` и `id_ge` +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### Пример использования `where` +#### Example using `where` -Вызовы запросов с результатом `failed`: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r } ``` -Вы можете использовать такие суффиксы, как `_gt`, `_lte` для сравнения значений: +You can use suffixes like `_gt`, `_lte` for value comparison: #### Пример фильтрации диапазона @@ -184,7 +184,7 @@ You can also filter entities that were updated in or after a specified block wit #### Пример фильтрации вложенных объектов -Фильтрация по вложенным объектам возможна в полях с суффиксом `_`. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. Это может быть полезно, если Вы хотите получать только объекты, у которых объекты дочернего уровня удовлетворяют заданным условиям. @@ -202,9 +202,9 @@ You can also filter entities that were updated in or after a specified block wit #### Логические операторы -Начиная с Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0), Вы можете группировать несколько параметров в одном и том же аргументе `where` с использованием операторов `and` или `or` для фильтрации результатов на основе более чем одного критерия. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### Оператор `AND` +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Syntactic sugar:**. Вы можете упростить приведенный выше запрос, удалив оператор `and`, передав подвыражение, разделенное запятыми. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -234,7 +234,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### Оператор `OR` +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Примечание**. При построении запросов важно учитывать влияние на производительность использования оператора `or`. Хотя `or` могут быть полезными инструментами для расширения результатов поиска, они также могут повлечь значительные затраты. Одна из основных проблем с `or` заключается в том, что это может привести к замедлению запросов. Это связано с тем, что `or` требует от базы данных сканирования нескольких индексов, что может занять много времени. Чтобы избежать этих проблем, разработчикам рекомендуется использовать операторы and вместо или всякий раз, когда это возможно. Это обеспечивает более точную фильтрацию и может привести к более быстрым и точным запросам. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### Все фильтры @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> Обратите внимание, что некоторые суффиксы поддерживаются только для определенных типов. Например, `Boolean` поддерживает только `_not`, `_in` и `_not_in`, а `_` доступен только для типов объектов и интерфейсов. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -Кроме того, в качестве части аргумента `where` доступны следующие глобальные фильтры: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### Запросы на Time-travel -Вы можете запрашивать состояние своих объектов не только для последнего блока, который используется по умолчанию, но и для произвольного блока в прошлом. Блок, в котором должен выполняться запрос, можно указать либо по номеру блока, либо по его хэшу, включив аргумент `block` в поля верхнего уровня запросов. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Этот запрос вернет объекты `Challenge` и связанные с ними объекты `Application` в том виде, в каком они существовали сразу после обработки блока номер 8 000 000. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### Пример @@ -325,26 +325,26 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Этот запрос вернет объекты `Challenge` и связанные с ними объекты `Application` в том виде, в каком они существовали сразу после обработки блока с заданным хешем. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### Полнотекстовые поисковые запросы -Поля запроса полнотекстового поиска предоставляют API-интерфейс содержательного текстового поиска, который можно добавить в схему субграфа и настроить. См. [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields), чтобы добавить полнотекстовый поиск в свой субграф. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -Запросы полнотекстового поиска имеют одно обязательное поле, `text`, для предоставления поисковых запросов. В этом поле поиска `text` можно использовать несколько специальных операторов полнотекстового поиска. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Полнотекстовые поисковые операторы: -| Символ | Оператор | Описание | -| --- | --- | --- | -| `&` | `And` | Для объединения нескольких условий поиска в фильтр для объектов, которые включают все указанные условия | -| | | `Or` | Запросы с несколькими условиями поиска, разделенные оператором or, вернут все объекты, которые соответствуют любому из предоставленных условий | -| `<->` | `Follow by` | Укажите расстояние между двумя словами. | -| `:*` | `Prefix` | Используйте поисковый запрос по префиксу, чтобы найти слова с соответствующим префиксом (необходимо 2 символа) | +| Символ | Оператор | Описание | +| ------ | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Для объединения нескольких условий поиска в фильтр для объектов, которые включают все указанные условия | +| | | `Or` | Запросы с несколькими условиями поиска, разделенные оператором or, вернут все объекты, которые соответствуют любому из предоставленных условий | +| `<->` | `Follow by` | Укажите расстояние между двумя словами. | +| `:*` | `Prefix` | Используйте поисковый запрос по префиксу, чтобы найти слова с соответствующим префиксом (необходимо 2 символа) | #### Примеры -Используя оператор `or`, этот запрос отфильтрует объекты блога с вариантами «anarchism» или «crumpet» в их полнотекстовых полях. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Оператор `follow by` указывает слова на определенном расстоянии друг от друга в полнотекстовых документах. Следующий запрос вернет все блоги с вариантами «decentralize», за которыми следует «philosophy» +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ The result of such a query will not change over time, i.e., querying at a certai ### Валидация -Graph Node реализует [на основе спецификаций](https://spec.graphql.org/October2021/#sec-Validation) проверку запросов GraphQL, которые получает с помощью [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), основанного на [референтная реализация graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Запросы, которые не соответствуют правилу проверки, вызывают стандартную ошибку. Подробнее см. в [спецификации GraphQL](https://spec.graphql.org/October2021/#sec-Validation). +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## Схема -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### Объекты -Все типы GraphQL с директивами `@entity` в Вашей схеме будут рассматриваться как объекты и должны иметь поле `ID`. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **Примечание.** В настоящее время все типы в Вашей схеме должны иметь директиву `@entity`. В будущем мы будем рассматривать типы без директивы `@entity` как объекты значений, но это пока не поддерживается. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### Метаданные субграфа -Все субграфы имеют автоматически сгенерированный объект `_Meta_`, который обеспечивает доступ к метаданным субграфа. Запросить это можно следующим образом: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` Если предоставлен блок, метаданные относятся к этому блоку, в противном случае используется последний проиндексированный блок. Если предоставляется блок, он должен быть после начального блока субграфа и меньше или равен последнему проиндексированному блоку. -`deployment` — это уникальный идентификатор, соответствующий CID IPFS файла `subgraph.yaml`. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` предоставляет информацию о последнем блоке (с учетом всех ограничений блока, переданных в `_meta`): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash: хэш блока - number: номер блока - timestamp: временная метка блока, если она доступна (в настоящее время доступна только для субграфов, индексирующих сети EVM) -`hasIndexingErrors` — логическое значение, определяющее, обнаружил ли субграф ошибки индексации в каком-то предыдущем блоке +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From a7f33da2dd6032521526fccec782eeae3b73067b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:56 -0500 Subject: [PATCH 0250/1534] New translations graphql-api.mdx (Swedish) --- .../sv/subgraphs/querying/graphql-api.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/sv/subgraphs/querying/graphql-api.mdx b/website/src/pages/sv/subgraphs/querying/graphql-api.mdx index b3ce32a629e3..0ff440abcefb 100644 --- a/website/src/pages/sv/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/sv/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Exempel -Förfrågan efter en enda `Token` -entitet som är definierad i din schema: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ Förfrågan efter en enda `Token` -entitet som är definierad i din schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -Fråga alla `Token`-enheter: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### Exempel på sortering av nästlade entiteter -Från och med Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) kan entiteter sorteras på basis av nästlade entiteter. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> För närvarande kan du sortera efter `String`- eller `ID`-typer på en djup nivå i fälten `@entity` och `@derivedFrom`. Tyvärr stöds ännu inte [ sortering efter gränssnitt på en nivå djupa enheter](https://github.com/graphprotocol/graph-node/pull/4058) sortering efter fält som är matriser och kapslade enheter. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### Paginering @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### Exempel med `first` +#### Example using `first` Fråga efter de första 10 tokens: @@ -101,11 +101,11 @@ Fråga efter de första 10 tokens: } ``` -För att söka efter grupper av enheter i mitten av en samling kan parametern `skip` användas tillsammans med parametern `first` för att hoppa över ett angivet antal enheter med start i början av samlingen. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### Exempel med `first` och `skip` +#### Example using `first` and `skip` -Fråga 10 `Token`-enheter, förskjutna med 10 platser från början av samlingen: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ Fråga 10 `Token`-enheter, förskjutna med 10 platser från början av samlingen } ``` -#### Exempel med `first` och `id_ge` +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### Exempel med `where` +#### Example using `where` -Fråga utmaningar med `failed` resultat: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ Fråga utmaningar med `failed` resultat: } ``` -Du kan använda suffix som `_gt`, `_lte` för värdejämförelse: +You can use suffixes like `_gt`, `_lte` for value comparison: #### Exempel på filtrering av intervall @@ -184,7 +184,7 @@ Detta kan vara användbart om du bara vill hämta enheter som har ändrats, till #### Exempel på filtrering av inbäddade entiteter -Filtrering baserat på inbäddade entiteter är möjligt i fälten med `_` suffix. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. Detta kan vara användbart om du vill hämta endast entiteter vars entiteter på barnnivå uppfyller de angivna villkoren. @@ -202,9 +202,9 @@ Detta kan vara användbart om du vill hämta endast entiteter vars entiteter på #### Logiska operatorer -Från och med Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) kan du gruppera flera parametrar i samma `where`-argument med hjälp av `och` eller `eller` operatorer för att filtrera resultat baserat på mer än en kriterium. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### `OCH` Operator +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Syntactic sugar:** Du kan förenkla ovanstående fråga genom att ta bort `and`-operatorn och istället skicka ett underuttryck separerat med kommatecken. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -234,7 +234,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### `OR` Operatör +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Note**: När man konstruerar sökfrågor är det viktigt att ta hänsyn till hur användningen av operatorn `or` påverkar prestandan. Även om `or` kan vara ett användbart verktyg för att bredda sökresultaten, kan det också ha betydande kostnader. Ett av de största problemen med `or` är att det kan göra sökningar långsammare. Detta beror på att `or` kräver att databasen söker igenom flera index, vilket kan vara en tidskrävande process. För att undvika dessa problem rekommenderas att utvecklare använder och-operatorer istället för eller när det är möjligt. Detta möjliggör mer exakt filtrering och kan leda till snabbare och mer exakta frågor. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### Alla filter @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> Observera att vissa suffix endast stöds för specifika typer. Till exempel stöder `Boolean` endast `_not`, `_in` och `_not_in`, men `_` är endast tillgängligt för objekt- och gränssnittstyper. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -Dessutom är följande globala filter tillgängliga som en del av argumentet `where`: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### Tidsreseförfrågningar -Du kan förfråga tillståndet för dina enheter inte bara för den senaste blocken, som är standard, utan också för en godtycklig block i det förflutna. Blocket vid vilket en förfrågan ska ske kan specifieras antingen med dess blocknummer eller dess blockhash genom att inkludera ett `block`-argument i toppnivåfälten för förfrågningar. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Denna fråga kommer att returnera `Challenge`-enheter och deras tillhörande `Application`-enheter, så som de existerade direkt efter bearbetning av block nummer 8.000.000. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### Exempel @@ -325,26 +325,26 @@ Denna fråga kommer att returnera `Challenge`-enheter och deras tillhörande `Ap } ``` -Denna förfrågan kommer att returnera `Challenge`-entiteter och deras associerade `Application`-entiteter så som de fanns direkt efter bearbetning av blocket med den angivna hashen. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### Fulltextsökförfrågningar -Fält för fulltextsökning ger en uttrycksfull textsöknings-API som kan läggas till i undergrafens schema och anpassas. Hänvisa till [Definiera fält för fulltextsökning](/utveckling/skapa-en-undergraf/#definiera-fält-för-fulltextsökning) för att lägga till fulltextsökning i din undergraf. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -Fulltextsökförfrågningar har ett obligatoriskt fält, `text`, för att tillhandahålla söktermer. Flera specialiserade fulltextoperatorer finns tillgängliga att användas i detta `text`-sökfält. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Fulltextsökoperatorer: -| Symbol | Operatör | Beskrivning | -| --- | --- | --- | -| `&` | `Och` | För att kombinera flera söktermer till ett filter för entiteter som inkluderar alla de angivna termerna | -| | | `Eller` | Förfrågningar med flera söktermer separerade av ellipsen kommer att returnera alla entiteter med en matchning från någon av de angivna termerna | -| `<->` | `Följs av` | Ange avståndet mellan två ord. | -| `:*` | `Prefix` | Använd prefixsöktermen för att hitta ord vars prefix matchar (2 tecken krävs.) | +| Symbol | Operatör | Beskrivning | +| ------ | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | För att kombinera flera söktermer till ett filter för entiteter som inkluderar alla de angivna termerna | +| | | `Or` | Förfrågningar med flera söktermer separerade av ellipsen kommer att returnera alla entiteter med en matchning från någon av de angivna termerna | +| `<->` | `Follow by` | Ange avståndet mellan två ord. | +| `:*` | `Prefix` | Använd prefixsöktermen för att hitta ord vars prefix matchar (2 tecken krävs.) | #### Exempel -Med hjälp av operatorn `or` filtreras denna fråga till bloggenheter med variationer av antingen "anarchism" eller "crumpet" i sina fulltextfält. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ Med hjälp av operatorn `or` filtreras denna fråga till bloggenheter med variat } ``` -Operatorn `follow by` anger ord som står på ett visst avstånd från varandra i fulltextdokumenten. Följande fråga kommer att returnera alla bloggar med variationer av "decentralisera" följt av "filosofi" +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ Kombinera fulltextoperatorer för att skapa mer komplexa filter. Med en pretext- ### Validering -Graph Node implementerar [specifikationsbaserad](https://spec.graphql.org/October2021/#sec-Validation) validering av de GraphQL-frågor den tar emot med hjälp av [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), som är baserad på [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Frågor som misslyckas med en valideringsregel får ett standardfel - besök [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) för mer information. +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### Entiteter -Alla GraphQL-typer med `@entity`-direktiv i ditt schema kommer att behandlas som entiteter och måste ha ett `ID`-fält. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **Note:** För närvarande måste alla typer i ditt schema ha ett `@entity`-direktiv. I framtiden kommer vi att behandla typer utan ett `@entity`-direktiv som värdeobjekt, men detta stöds ännu inte. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### Metadata för undergrafer -Alla subgrafer har ett autogenererat `_Meta_`-objekt, som ger tillgång till subgrafens metadata. Detta kan efterfrågas på följande sätt: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ Alla subgrafer har ett autogenererat `_Meta_`-objekt, som ger tillgång till sub Om ett block anges är metadata från det blocket, om inte används det senast indexerade blocket. Om det anges måste blocket vara efter undergrafens startblock och mindre än eller lika med det senast indexerade blocket. -`deployment` är ett unikt ID som motsvarar IPFS CID för filen `subgraph.yaml`. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` ger information om det senaste blocket (med hänsyn till eventuella blockbegränsningar som skickas till `_meta`): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash: blockets hash - nummer: blockets nummer - timestamp: blockets timestamp, om tillgänglig (detta är för närvarande endast tillgängligt för undergrafer som indexerar EVM-nätverk) -`hasIndexingErrors` är en boolean som identifierar om undergrafen stötte på indexeringsfel vid något tidigare block +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From 61f87fac1162aa2ba34b9c6a90abcedc374866da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:57 -0500 Subject: [PATCH 0251/1534] New translations graphql-api.mdx (Turkish) --- .../tr/subgraphs/querying/graphql-api.mdx | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/tr/subgraphs/querying/graphql-api.mdx b/website/src/pages/tr/subgraphs/querying/graphql-api.mdx index 504817e886c0..e094085206ab 100644 --- a/website/src/pages/tr/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/tr/subgraphs/querying/graphql-api.mdx @@ -29,9 +29,9 @@ Query for a single `Token` entity defined in your schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -Tüm `Token` varlıklarını sorgulayın: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### İç içe varlık sıralaması için örnek -Graph Düğümü [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0)'dan itibaren varlıklar iç içe geçmiş varlıklar bazında sıralanabilir. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> Şu anda, `@entity` ve `@derivedFrom` alanlarında tek seviye derinliğindeki `String` veya `ID` tiplerine göre sıralama yapabilirsiniz. Ne yazık ki, [tek seviye derinliğindeki varlıklarda arayüzlere göre sıralama](https://github.com/graphprotocol/graph-node/pull/4058), diziler ve iç içe geçmiş varlıklar olan alanlara göre sıralama henüz desteklenmemektedir. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### Sayfalandırma @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### `first`'ün kullanımına örnek +#### Example using `first` İlk 10 tokeni sorgulayın: @@ -101,11 +101,11 @@ When querying a collection, it's best to: } ``` -Bir koleksiyonun ortasındaki varlık gruplarını sorgulamak için `skip` parametresi, koleksiyonun başından başlayarak belirli sayıda varlığı atlamak üzere `first` parametresi ile birlikte kullanılabilir. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### `first` ve `skip`'in kullanımına örnek +#### Example using `first` and `skip` -10 `Token` varlığını sorgulayın, bunları koleksiyonun başlangıcından itibaren 10 sıra kaydırın: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ Bir koleksiyonun ortasındaki varlık gruplarını sorgulamak için `skip` param } ``` -#### `first` ve `id_ge`'nin kullanımına örnek +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### `where`'in kullanımına örnek +#### Example using `where` -`failed` ile sonuçlanan sorgu zorlukları: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r } ``` -Değer karşılaştırması için `_gt`, `_lte` gibi son ekler kullanabilirsiniz: +You can use suffixes like `_gt`, `_lte` for value comparison: #### Aralık filtreleme için örnek @@ -184,7 +184,7 @@ You can also filter entities that were updated in or after a specified block wit #### İç içe varlık filtreleme örneği -İç içe geçmiş varlıklar temelinde filtreleme, `_` son ekine sahip alanlarda mümkündür. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. Bu, yalnızca alt düzey varlıkları sağlanan koşulları karşılayan varlıkları getirmek istiyorsanız yararlı olabilir. @@ -202,7 +202,7 @@ Bu, yalnızca alt düzey varlıkları sağlanan koşulları karşılayan varlık #### Mantıksal operatörler -Graph Düğümü'nün [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) sürümüne göre, birden fazla parametreyi aynı `where` argümanında gruplayabilirsiniz. Bu, sonuçları birden fazla kritere göre filtrelemek için `and` veya `or` operatörlerini kullanmanıza olanak tanır. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. ##### `AND` Operator @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Syntactic sugar:** Yukarıdaki sorguyu, virgülle ayrılmış bir alt ifade geçirerek, `and` operatörünü kaldırarak basitleştirebilirsiniz. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **Not**: Sorguları oluştururken, `or` operatörünü kullanmanın performans üzerindeki etkisini göz önünde bulundurmak önemlidir. `or` arama sonuçlarını genişletmek için yararlı bir araç olsa da, önemli maliyetleri de olabilir. `or` ile ilgili temel sorunlardan biri, sorguların yavaşlamasına neden olabilmesidir. Bunun nedeni, `or` operatörünün veritabanının birden fazla dizini taramasını gerektirmesidir ve bu da zaman alıcı bir işlem olabilir. Bu sorunlardan kaçınmak için, geliştiricilerin mümkün olduğunda or yerine and operatörlerini kullanmaları önerilir. Bu, daha hassas filtreleme sağlar ve daha hızlı, daha doğru sorgulara yol açabilir. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### Tüm Filtreler @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> Lütfen bazı eklentilerin yalnızca belirli tipler için desteklendiğini unutmayın. Örneğin, `Boolean` yalnızca `_not`, `_in` ve `not_in` desteği sağlar, ancak `_` yalnızca nesne ve arayüz tipleri için kullanılabilir. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -Ayrıca, aşağıdaki global filtreler `where` argümanının bir parçası olarak kullanılabilir: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### Zaman yolculuğu sorguları -Varlıklarınızın durumunu yalnızca varsayılan olan en son blok için değil, aynı zamanda geçmişteki rastgele bir blok için de sorgulayabilirsiniz. Bir sorgunun gerçekleşmesi gereken blok, sorguların üst düzey alanlarına bir `block` bağımsız değişkeni eklenerek blok numarası veya blok karması ile belirtilebilir. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -Bu sorgu, 8.000.000 numaralı bloğun işlenmesinden hemen sonra var oldukları şekliyle `Challenge` varlıklarını ve bunlarla ilişkili `Application` varlıklarını döndürür. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### Örnek @@ -325,26 +325,26 @@ Bu sorgu, 8.000.000 numaralı bloğun işlenmesinden hemen sonra var oldukları } ``` -Bu sorgu, verilen hash ile bloğun işlenmesinden hemen sonra var olan şekliyle `Challenge` varlıklarını ve bunlarla ilişkili `Application` varlıklarını döndürür. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### Tam Metin Arama Sorguları -Tam metin arama sorgu alanları, subgraph şemasına eklenebilen ve özelleştirilebilen etkileyici bir metin arama API'si sağlar. Subgraph'ınıza tam metin araması eklemek için [Tam Metin Arama Alanlarını Tanımlama](/developing/creating-a-subgraph/#defining-fulltext-search-fields) bölümüne göz atın. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -Tam metin arama sorgularının kullanması gereken bir zorunlu alanı vardır, bu alan `text` adını taşır ve arama terimlerini sağlamak için kullanılır. Bu `text` arama alanında kullanılmak üzere birkaç özel tam metin operatörü mevcuttur. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. Tam metin arama operatörleri: -| Symbol | Operator | Tanım | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Tanım | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Örnekler -`or` operatörünü kullanan bu sorgu, tam metin alanlarında "anarchism" veya "crumpet" varyasyonları bulunan blog varlıklarını filtreleyecektir. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ Tam metin arama operatörleri: } ``` -`follow by` operatörü, tam metin belgelerinde belirli bir mesafe uzaklıktaki kelimeleri belirtir. Aşağıdaki sorgu "decentralize" ve ardından "philosophy" kelimelerinin geçtiği tüm blogları döndürür +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ Daha karmaşık filtreler oluşturmak için tam metin operatörlerini birleştir ### Validasyon -Graph Düğümü, [graphql-js referans uygulamasını](https://github.com/graphql/graphql-js/tree/main/src/validation) temel alan [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules)'yi kullanarak aldığı GraphQL sorgularının [spesifikasyon tabanlı](https://spec.graphql.org/October2021/#sec-Validation) doğrulamasını gerçekleştirir. Bir doğrulama kuralını geçemeyen sorgular standart bir hata ile sonuçlanır. Daha fazla bilgi için [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation)'i ziyaret edin. +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### Varlıklar -Şemanızda `@entity` yönergeleri bulunan tüm GraphQL türleri varlık olarak değerlendirilir ve bir `ID` alanına sahip olmalıdır. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **Not:** Şu anda, şemanızdaki tüm tiplerin bir `@entity` yönergesine sahip olması gerekmektedir. İlerleyen zamanlarda, `@entity` yönergesi olmadan tanımlanan tipleri değer nesneleri olarak ele alacağız, ancak bu henüz desteklenmemektedir. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### Subgraph Üst Verisi -Tüm subgraphlar, subgraph üst verisine erişim sağlayan otomatik olarak oluşturulmuş bir `_Meta_` nesnesine sahiptir. Bu aşağıdaki gibi sorgulanabilir: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ Tüm subgraphlar, subgraph üst verisine erişim sağlayan otomatik olarak oluş Eğer bir blok belirtilirse, üst veri o blokla ilgilidir; belirtilmezse en son dizinlenen blok dikkate alınır. Eğer belirtilirse, blok subgraph başlangıç bloğundan sonra olmalıdır ve en son indekslenen bloğa eşit veya daha küçük olmalıdır. -`deployment` eşsiz bir kimliktir ve `subgraph.yaml` dosyasının IPFS CID'sine karşılık gelir. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` en son blok hakkında bilgi sağlar (`_meta`'ya aktarılan blok kısıtlamalarını dikkate alarak): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash: the hash of the block - number: the block number - timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) -`hasIndexingErrors` ifadesi, subgraph'ın önceki bazı bloklarda indeksleme hatalarıyla karşılaşıp karşılaşmadığını belirleyen bir boolean değeridir +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From 5dafd3f093e21228b0a99ba600cbb373517b78e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:47:59 -0500 Subject: [PATCH 0252/1534] New translations graphql-api.mdx (Ukrainian) --- .../pages/uk/subgraphs/querying/graphql-api.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/uk/subgraphs/querying/graphql-api.mdx b/website/src/pages/uk/subgraphs/querying/graphql-api.mdx index f9176794ae2b..d93f73706ec6 100644 --- a/website/src/pages/uk/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/uk/subgraphs/querying/graphql-api.mdx @@ -29,7 +29,7 @@ Query for a single `Token` entity defined in your schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. Query all `Token` entities: @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Symbol | Operator | Description | -| --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). From d638a6a3021d7a39016de48621e2da3ffc40c712 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:00 -0500 Subject: [PATCH 0253/1534] New translations graphql-api.mdx (Chinese Simplified) --- .../zh/subgraphs/querying/graphql-api.mdx | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/zh/subgraphs/querying/graphql-api.mdx b/website/src/pages/zh/subgraphs/querying/graphql-api.mdx index 06550ca17c23..9b2b5cd1093b 100644 --- a/website/src/pages/zh/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/zh/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### 例子 -查询模式中定义的单个 `Token` 实体: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -查询所有 `Token` 实体: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### 嵌套实体筛选示例 -从Graph 节点 [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0)开始,可以根据嵌套实体对实体进行排序。 +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> 目前,您可以在`@entity`和`@derivedFrom`字段上按一级深度的`String`或`ID`类型进行排序。不幸的是,[目前还不支持按一级深层实体上的接口排序](https://github.com/graphprotocol/graph-node/pull/4058)、按数组和嵌套实体的字段排序。 +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### 分页 @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### 使用`first`的示例 +#### Example using `first` 查询前10 个代币: @@ -101,11 +101,11 @@ When querying a collection, it's best to: } ``` -要查询集合中间的实体组,`skip` 参数可以与 `first` 参数结合使用,以跳过集合开头的指定数量实体。 +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### 使用`first`和`skip`的示例 +#### Example using `first` and `skip` -查询从集合开头偏移 10 个位置的 10 个`Token` 实体: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ When querying a collection, it's best to: } ``` -#### 使用`first`和`id_ge`的示例 +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### 使用`where`的示例 +#### Example using `where` -查询包含`failed` 属性的challenges 的 结果: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r } ``` -您可以使用 `_gt`、`_lte` 等后缀进行值比较: +You can use suffixes like `_gt`, `_lte` for value comparison: #### 范围过滤示例 @@ -184,7 +184,7 @@ You can also filter entities that were updated in or after a specified block wit #### 嵌套实体筛选示例 -可以在带 `_` 后缀的字段中基于嵌套实体进行筛选。 +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. 如果您希望只获取其子级实体满足条件的实体,那么这可能很有用。 @@ -202,9 +202,9 @@ You can also filter entities that were updated in or after a specified block wit #### 逻辑运算符 -从Graph 节点 [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0)版本开始,您可以在同一`where`参数中对多个参数进行分组,使用`and``or`运算符根据多个标准筛选结果。 +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### `AND`运算符 +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **语法糖:**您可以通过传递一个用逗号分隔的子表达式来删除`and`运算符,从而简化上述查询。 +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -234,7 +234,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### `OR`运算符 +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **注意**:在构造查询时,重要的是要考虑使用`or`运算符对性能的影响。虽然`or`可能是扩大搜索结果的有用工具,但它也可能有巨大的成本。`or`的主要问题之一是它可能会导致查询速度减慢。这是因为`or`需要数据库扫描多个索引,这可能是一个耗时的过程。为了避免这些问题,建议开发人员尽可能使用和运算符而不是或。这允许更精确地过滤,并可以导致更快、更准确的查询。 +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### 所有过滤器 @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> 请注意,某些后缀仅支持特定类型。 例如,`Boolean` 仅支持 `_not`、`_in` 和 `_not_in`,但是`_`只适用于对象和接口类型。。 +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -此外,以下全局过滤器作为 `where` 参数的一部分可用: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### 跨时间查询 -您可以查询实体的状态,不仅查询默认的最新区块,还可以查询过去的任意区块。通过在查询的顶级字段中包含`block`参数,可以通过区块号或区块哈希指定应该发生查询的区块。 +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -此查询将返回 `Challenge` 实体及其关联的 `Application` 实体,因为它们在处理第 8000000 区块后就存在了。 +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### 示例 @@ -325,26 +325,26 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -此查询将返回 `Challenge` 实体及其关联的 `Application` 实体,因为它们在处理具有给定哈希值的区块后就存在了。 +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### 全文搜索查询 -全文搜索查询字段提供了一个表达性的文本搜索 API,可以添加到子图模式中并进行自定义。 请参阅[定义全文搜索字段](/developing/creating-a-subgraph/#defining-fulltext-search-fields)以将全文搜索添加到您的子图中。 +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -全文搜索查询有一个必填字段 `text`,用于提供搜索词。 在这个 `text` 搜索字段中可以使用几个特殊的全文运算符。 +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. 全文搜索运算符: -| 符号 | 运算符 | 描述 | -| ------ | ----------- | ---------------------------------------------------------------------- | -| `&` | `And` | 用于将多个搜索词组合到包含所有提供词条的实体的过滤器中 | +| 符号 | 运算符 | 描述 | +| ------ | ----------- | ------------------------------------- | +| `&` | `And` | 用于将多个搜索词组合到包含所有提供词条的实体的过滤器中 | | | | `Or` | 由 or 运算符分隔的多个搜索词的查询,将返回与任何提供的词匹配的所有实体 | -| `<->` | `Follow by` | 指定两个单词之间的距离。 | -| `:*` | `Prefix` | 使用前缀搜索词查找前缀匹配的单词(需要 2 个字符) | +| `<->` | `Follow by` | 指定两个单词之间的距离。 | +| `:*` | `Prefix` | 使用前缀搜索词查找前缀匹配的单词(需要 2 个字符) | #### 例子 -使用 `or` 运算符,此查询将过滤到在其全文字段中具有“anarchism”或“crumpet”变体的 blog 实体。 +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -`follow by` 运算符指定全文文档中相隔特定距离的单词。 以下查询将返回所有“decentralize”后跟着“philosophy”变体的日志。 +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ The result of such a query will not change over time, i.e., querying at a certai ### 验证 -Graph Node使用[graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules)对其接收的GraphQL查询进行[基于规范](https://spec.graphql.org/October2021/#sec-Validation)的验证,该工具基于[GraphQL-js引用实现](https://github.com/graphql/graphql-js/tree/main/src/validation)。验证规则失败的查询会出现标准错误-请访问[GraphQL规范](https://spec.graphql.org/October2021/#sec-Validation)了解更多信息。 +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## 模式 -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### 实体 -模式定义中所有带有 `@entity` 指示的 GraphQL 类型都将被视为实体,并且必须具有 `ID` 字段。 +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **注意:** 目前,模式定义中的所有类型都必须具有 `@entity` 指示。 将来,我们会将没有 `@entity` 指示的类型视为值对象,但目前尚不支持。 +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### 子图元数据 -所有子图都有一个自动生成的`_Meta_`对象,它提供对子图元数据的访问。可按如下方式查询: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` 如果提供了区块,则元数据为该区块的元数据,如果未使用最新的索引区块。如果提供,则区块必须在子图的起始区块之后,并且小于或等于最近索引的区块。 -`deployment`是一个唯一的ID,对应于`subgraph.yaml`文件的IPFS CID。 +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block`提供有关最新区块的信息(考虑传递给`_meta`的任何区块约束): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash:区块的哈希 - number:区块编号 - timestamp:区块的时间戳(如果可用)(当前仅适用于索引EVM网络的子图) -`hasIndexingErrors`是一个布尔值,用于标识子图在过去的某个区块中是否遇到索引错误。 +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From a452b12546ab520106f46b366db0e6dbde894b99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:01 -0500 Subject: [PATCH 0254/1534] New translations graphql-api.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/querying/graphql-api.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/ur/subgraphs/querying/graphql-api.mdx b/website/src/pages/ur/subgraphs/querying/graphql-api.mdx index 0069bd95c7d4..aba6fe64509b 100644 --- a/website/src/pages/ur/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/ur/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### مثالیں -آپ کے اسکیما میں بیان کردہ واحد `Token` ہستی کے لیے کیوری: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -تمام `Token` اداروں سے کیوری کریں: +Query all `Token` entities: ```graphql { @@ -62,7 +62,7 @@ When querying a collection, you may: #### نیسٹڈ ہستی کی چھانٹی کی مثال -گراف نوڈ کے مطابق [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) ہستیوں کو ترتیب دیا جا سکتا ہے نیسٹڈ اداروں کی بنیاد پر. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. The following example shows tokens sorted by the name of their owner: @@ -77,7 +77,7 @@ The following example shows tokens sorted by the name of their owner: } ``` -> فی الحال, آپ `@entity` اور `@derivedFrom` فیلڈز پر ایک سطح کی گہری `String` یا `ID` اقسام کے مطابق ترتیب دے سکتے ہیں۔ بدقسمتی سے، [ایک سطح کی گہری ہستیوں پر انٹرفیس کے لحاظ سے چھانٹنا](https://github.com/graphprotocol/graph-node/pull/4058)، ان فیلڈز کے لحاظ سے چھانٹنا جو اری اور نیسٹڈ ہستی ہیں ابھی تک حمایت نہیں کی. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### صفحہ بندی @@ -88,7 +88,7 @@ When querying a collection, it's best to: - Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. - Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### `first` استعمال کرنے کی مثال +#### Example using `first` پہلے 10 ٹوکنز سے کیوری کریں: @@ -101,11 +101,11 @@ When querying a collection, it's best to: } ``` -مجموعے کے بیچ میں اداروں کے گروپس کے لیے کیوری کرنے کے لیے، `skip` پیرامیٹر کو `first` پیرامیٹر کے ساتھ استعمال کیا جا سکتا ہے تاکہ مجموعہ کے آغاز سے شروع ہونے والی ایک مخصوص تعداد کو چھوڑ دیا جا سکے. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### `first` اور `skip` استعمال کرنے کی مثال +#### Example using `first` and `skip` -کیوری 10 `Token` ہستیوں کو، مجموعہ کے آغاز سے 10 مقامات سے آف سیٹ: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ When querying a collection, it's best to: } ``` -#### `first` اور `id_ge` استعمال کرنے کی مثال +#### Example using `first` and `id_ge` If a client needs to retrieve a large number of entities, it's more performant to base queries on an attribute and filter by that attribute. For example, a client could retrieve a large number of tokens using this query: @@ -136,9 +136,9 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r - You can use the `where` parameter in your queries to filter for different properties. - You can filter on multiple values within the `where` parameter. -#### `where` استعمال کرنے کی مثال +#### Example using `where` -`failed` نتیجہ کے ساتھ کیوری کے چیلنجز: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r } ``` -آپ قدر کے موازنہ کے لیے `_gt`، `_lte` جیسے لاحقے استعمال کرسکتے ہیں: +You can use suffixes like `_gt`, `_lte` for value comparison: #### رینج فلٹرنگ کی مثال @@ -184,7 +184,7 @@ You can also filter entities that were updated in or after a specified block wit #### نیسٹڈ ہستی فلٹرنگ کی مثال -`_` لاحقہ والے فیلڈز میں نیسٹڈ ہستیوں کی بنیاد پر فلٹرنگ ممکن ہے. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. یہ مفید ہو سکتا ہے اگر آپ صرف ان ہستیوں کو لانے کے خواہاں ہیں جن کی چائلڈ لیول کے ہستی فراہم کردہ شرائط کو پورا کرتے ہیں. @@ -202,9 +202,9 @@ You can also filter entities that were updated in or after a specified block wit #### منطقی آپریٹرز -گراف نوڈ کے مطابق [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) آپ متعدد گروپ بنا سکتے ہیں۔ ایک سے زیادہ معیارات کی بنیاد پر نتائج کو فلٹر کرنے کے لیے `and` یا `or` آپریٹرز کا استعمال کرتے ہوئے اسی `where` دلیل میں پیرامیٹرز. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. -##### `AND` آپریٹر +##### `AND` Operator The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. @@ -220,7 +220,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num } ``` -> **Syntactic شوگر:** آپ `and` آپریٹر کو ہٹا کر کوما سے الگ کردہ سب اظہار کو پاس کر کے مذکورہ کیوری کو آسان بنا سکتے ہیں. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -234,7 +234,7 @@ The following example filters for challenges with `outcome` `succeeded` and `num > } > ``` -##### `OR` آپریٹر +##### `OR` Operator The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. @@ -250,7 +250,7 @@ The following example filters for challenges with `outcome` `succeeded` or `numb } ``` -> **نوٹ**: کیوریز تیار کرتے وقت، `or` آپریٹر کے استعمال کے کارکردگی کے اثرات پر غور کرنا ضروری ہے۔ جب کہ `or` تلاش کے نتائج کو وسیع کرنے کے لیے ایک کارآمد ٹول ہو سکتا ہے، لیکن اس کے اہم اخراجات بھی ہو سکتے ہیں۔ `or` کے ساتھ اہم مسائل میں سے ایک یہ ہے کہ یہ کیوریز کو سست کرنے کا سبب بن سکتا ہے۔ اس کی وجہ یہ ہے کہ `or` کو متعدد اشاریہ جات کے ذریعے اسکین کرنے کے لیے ڈیٹا بیس کی ضرورت ہوتی ہے، جو کہ ایک وقت طلب عمل ہوسکتا ہے۔ ان مسائل سے بچنے کے لیے، یہ تجویز کیا جاتا ہے کہ ڈویلپرز استعمال کریں اور آپریٹرز کے بجائے یا جب بھی ممکن ہو۔ یہ زیادہ درست فلٹرنگ کی اجازت دیتا ہے اور تیز تر، زیادہ درست کیوریز کا باعث بن سکتا ہے. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### تمام فلٹرز @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> براہ کرم نوٹ کریں کہ کچھ لاحقے صرف مخصوص اقسام کے لیے معاون ہیں۔ مثل کے طور پر، `Boolean` صرف `_not`، `_in`، اور `_not_in` کو سپورٹ کرتا ہے، لیکن `_` دستیاب ہے صرف آبجیکٹ اور انٹرفیس کی اقسام کے لیے. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -اس کے علاوہ، مندرجہ ذیل عالمی فلٹرز `where` دلیل کے حصے کے طور پر دستیاب ہیں: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -289,7 +289,7 @@ _change_block(number_gte: Int) ### ٹائم ٹریول کے کیوریز -آپ اپنے ہستیوں کی حالت کے بارے میں نہ صرف تازہ ترین بلاک کے لیے کیوری کر سکتے ہیں، جو پہلے سے طے شدہ ہے، بلکہ ماضی میں کسی آربٹریری بلاک کے لیے بھی۔ جس بلاک پر کیوری ہونا چاہیے اس کی وضاحت یا تو اس کے بلاک نمبر یا اس کے بلاک ہیش سے کیوریز کے ٹاپ لیول فیلڈز میں `block` دلیل شامل کر کے کی جا سکتی ہے. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. @@ -309,7 +309,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -یہ کیوری `Challenge` ہستیوں کو لوٹائے گا، اور ان سے وابستہ `Application` ہستیوں، جیسا کہ وہ بلاک نمبر 8,000,000 پروسیسنگ کے بعد براہ راست موجود تھے. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### مثال @@ -325,26 +325,26 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -یہ کیوری `Challenge` ہستیوں ور ان سے وابستہ `Application` ہستیوں کو لوٹائے گا، جیسا کہ وہ دی گئی ہیش کے ساتھ بلاک پر کارروائی کرنے کے بعد براہ راست موجود تھے. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### فل ٹیکسٹ تلاش کے کیوریز -فل ٹیکسٹ تلاش کے کیوری والے فیلڈز ایک تاثراتی ٹیکسٹ سرچ API فراہم کرتے ہیں جسے سب گراف اسکیما میں شامل کیا جا سکتا ہے اور اپنی مرضی کے مطابق بنایا جا سکتا ہے۔ اپنے سب گراف میں مکمل متن کی تلاش کو شامل کرنے کے لیے [فل ٹیکسٹ سرچ فیلڈز کی وضاحت](/developing/creating-a-subgraph/#defining-fulltext-search-fields) سے رجوع کریں. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -فل ٹیکسٹ کی تلاش کے کیوریز میں ایک مطلوبہ فیلڈ ہے، `text`, تلاش کی اصطلاحات کی فراہمی کے لیے. اس `text` تلاش کے فیلڈ میں استعمال کرنے کے لیے کئی خصوصی فل ٹیکسٹ آپریٹرز دستیاب ہیں. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. فل ٹیکسٹ سرچ آپریٹرز: -| علامت | آپریٹر | تفصیل | -| --- | --- | --- | -| `&` | `And` | ایک سے زیادہ تلاش کی اصطلاحات کو ایک فلٹر میں یکجا کرنے کے لیے ان ہستیوں کے لیے جس میں فراہم کردہ تمام اصطلاحات شامل ہوں | -| | | `Or` | Or آپریٹر کے ذریعہ الگ کردہ متعدد تلاش کی اصطلاحات کے ساتھ کیوریز فراہم کردہ شرائط میں سے کسی سے بھی مماثلت کے ساتھ تمام ہستیوں کو واپس کریں گے | -| `<>` | `Follow by` | دو الفاظ کے درمیان فاصلہ بتائیں. | -| `:*` | `Prefix` | ایسے الفاظ تلاش کرنے کے لیے پریفکس ​​تلاش کی اصطلاح استعمال کریں جن کا سابقہ ​​مماثل ہو (۲ حروف درکار ہیں.) | +| علامت | آپریٹر | تفصیل | +| ------ | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | ایک سے زیادہ تلاش کی اصطلاحات کو ایک فلٹر میں یکجا کرنے کے لیے ان ہستیوں کے لیے جس میں فراہم کردہ تمام اصطلاحات شامل ہوں | +| | | `Or` | Or آپریٹر کے ذریعہ الگ کردہ متعدد تلاش کی اصطلاحات کے ساتھ کیوریز فراہم کردہ شرائط میں سے کسی سے بھی مماثلت کے ساتھ تمام ہستیوں کو واپس کریں گے | +| `<->` | `Follow by` | دو الفاظ کے درمیان فاصلہ بتائیں. | +| `:*` | `Prefix` | ایسے الفاظ تلاش کرنے کے لیے پریفکس ​​تلاش کی اصطلاح استعمال کریں جن کا سابقہ ​​مماثل ہو (۲ حروف درکار ہیں.) | #### مثالیں -`or` آپریٹر استعمال کرتے ہوۓ​، یہ کیوری بلاگ ہستیوں کو فلٹر کرے گا جس میں ان کے فل ٹیکسٹ فیلڈز میں "انارکزم" یا "کرمپیٹ" کی مختلف حالتیں ہوں گی. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ The result of such a query will not change over time, i.e., querying at a certai } ``` -`follow by` آپریٹر فل ٹیکسٹ دستاویزات میں ایک مخصوص فاصلے کے علاوہ الفاظ کی وضاحت کرتا ہے۔ مندرجہ ذیل کیوری تمام بلاگز کو "ڈیسینٹرلائز" کے بعد "فلسفہ" کے تغیرات کے ساتھ واپس کرے گا +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,11 +385,11 @@ The result of such a query will not change over time, i.e., querying at a certai ### توثیق -گراف نوڈ [تخصص پر مبنی](https://spec.graphql.org/October2021/#sec-Validation) توثیق کو لاگو کرتا ہے جو اسے [graphql-tools-rs](https:// کا استعمال کرتے ہوئے موصول ہوتا ہے۔ github.com/dotansimha/graphql-tools-rs#validation-rules)، جو [graphql-js حوالہ کا نفاذ](https://github.com/graphql/graphql-js پر مبنی ہے /tree/main/src/validation)۔ وہ کیوریز جو توثیق کے اصول میں ناکام ہو جاتے ہیں ایک معیاری خرابی کے ساتھ ایسا کرتے ہیں - مزید جاننے کے لیے [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) ملاحظہ کریں. +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. ## سکیما -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -397,13 +397,13 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` ### ہستیوں -آپ کے اسکیما میں `@entity` ہدایات کے ساتھ تمام GraphQL اقسام کو ہستیوں کے طور پر سمجھا جائے گا اور ان کا ایک `ID` فیلڈ ہونا ضروری ہے. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **نوٹ:** فی الحال، آپ کے اسکیما میں تمام اقسام میں ایک `@entity` ہدایت ہونی چاہیے۔ مستقبل میں، ہم بغیر کسی `@entity` کی قسموں کو ویلیو آبجیکٹ کے طور پر دیکھیں گے، لیکن یہ ابھی تک تعاون یافتہ نہیں ہے. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. ### سب گراف میٹا ڈیٹا -تمام سب گراف میں ایک خودکار طور پر تیار کردہ `_Meta_` آبجیکٹ ہے، جو سب گراف میٹا ڈیٹا تک رسائی فراہم کرتا ہے۔ اس سے اس طرح کیوری کیا جا سکتا ہے: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -421,12 +421,12 @@ GraphQL schemas generally define root types for `queries`, `subscriptions` and ` اگر کوئی بلاک فراہم کیا جاتا ہے تو، میٹا ڈیٹا اس بلاک کا ہوتا ہے، اگر تازہ ترین انڈیکسڈ بلاک استعمال نہیں کیا جاتا ہے۔ اگر فراہم کیا گیا ہو، تو بلاک سب گراف کے اسٹارٹ بلاک کے بعد ہونا چاہیے، اور حال ہی میں انڈیکس کیے گئے بلاک سے کم یا اس کے برابر ہونا چاہیے. -`deployment` ایک منفرد ID ہے، جو `subgraph.yaml` فائل کے IPFS CID سے مطابقت رکھتی ہے. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` تازہ ترین بلاک کے بارے میں معلومات فراہم کرتا ہے (`_meta` کو بھیجی گئی کسی بھی بلاک کی رکاوٹوں کو مدنظر رکھتے ہوئے): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - ہیش: بلاک کی ہیش - نمبر: بلاک نمبر - ٹائم اسٹیمپ: بلاک کا ٹائم اسٹیمپ، اگر دستیاب ہو (یہ فی الحال صرف ای وی ایم نیٹ ورکس کو انڈیکس کرنے والے سب گرافس کے لیے دستیاب ہے) -`hasIndexingErrors` ایک بولین ہے جو اس بات کی نشاندہی کرتا ہے کہ آیا سب گراف کو کچھ ماضی کے بلاک پر انڈیکسنگ کی غلطیوں کا سامنا کرنا پڑا +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block From e5325dfe1cb10af037247f7a40458599eb020658 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:02 -0500 Subject: [PATCH 0255/1534] New translations graphql-api.mdx (Vietnamese) --- .../vi/subgraphs/querying/graphql-api.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/vi/subgraphs/querying/graphql-api.mdx b/website/src/pages/vi/subgraphs/querying/graphql-api.mdx index 3c519e73eff5..a7cf95b71002 100644 --- a/website/src/pages/vi/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/vi/subgraphs/querying/graphql-api.mdx @@ -18,7 +18,7 @@ In your subgraph schema you define types called `Entities`. For each `Entity` ty ### Examples -Truy vấn cho một thực thể `Token` được xác định trong lược đồ của bạn: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,7 +29,7 @@ Truy vấn cho một thực thể `Token` được xác định trong lược đ } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. Query all `Token` entities: @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Biểu tượng | Toán tử | Miêu tả | -| --- | --- | --- | -| `&` | `And` | Để kết hợp nhiều cụm từ tìm kiếm thành một bộ lọc cho các thực thể bao gồm tất cả các cụm từ được cung cấp | -| | | `Or` | Các truy vấn có nhiều cụm từ tìm kiếm được phân tách bằng toán tử hoặc sẽ trả về tất cả các thực thể có kết quả khớp với bất kỳ cụm từ nào được cung cấp | -| `<->` | `Follow by` | Chỉ định khoảng cách giữa hai từ. | -| `:*` | `Prefix` | Sử dụng cụm từ tìm kiếm tiền tố để tìm các từ có tiền tố khớp với nhau (yêu cầu 2 ký tự.) | +| Biểu tượng | Toán tử | Miêu tả | +| ---------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `&` | `And` | Để kết hợp nhiều cụm từ tìm kiếm thành một bộ lọc cho các thực thể bao gồm tất cả các cụm từ được cung cấp | +| | | `Or` | Các truy vấn có nhiều cụm từ tìm kiếm được phân tách bằng toán tử hoặc sẽ trả về tất cả các thực thể có kết quả khớp với bất kỳ cụm từ nào được cung cấp | +| `<->` | `Follow by` | Chỉ định khoảng cách giữa hai từ. | +| `:*` | `Prefix` | Sử dụng cụm từ tìm kiếm tiền tố để tìm các từ có tiền tố khớp với nhau (yêu cầu 2 ký tự.) | #### Examples @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Lược đồ -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). From 674e0ec52e8e64d8f3b7faa4fe19e85615858fa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:03 -0500 Subject: [PATCH 0256/1534] New translations graphql-api.mdx (Marathi) --- .../mr/subgraphs/querying/graphql-api.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/mr/subgraphs/querying/graphql-api.mdx b/website/src/pages/mr/subgraphs/querying/graphql-api.mdx index 50c24e85beeb..a4f348df73f0 100644 --- a/website/src/pages/mr/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/mr/subgraphs/querying/graphql-api.mdx @@ -29,7 +29,7 @@ Query for a single `Token` entity defined in your schema: } ``` -> Note: When querying for a single entity, the `id` field is required, and it must be writen as a string. +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. Query all `Token` entities: @@ -202,7 +202,7 @@ This can be useful if you are looking to fetch only entities whose child-level e #### Logical operators -ग्राफ नोड नुसार [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) तुम्ही एकाधिक गटबद्ध करू शकता एकापेक्षा जास्त निकषांवर आधारित परिणाम फिल्टर करण्यासाठी `आणि` किंवा `किंवा` ऑपरेटर वापरून समान `जिथे` युक्तिवादात पॅरामीटर्स. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. ##### `AND` Operator @@ -335,12 +335,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Symbol | Operator | वर्णन | -| --- | --- | --- | -| `&` | `आणि` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `किंवा` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `द्वारे अनुसरण करा` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| Symbol | Operator | वर्णन | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `Follow by` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | #### Examples @@ -389,7 +389,7 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Schema -The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +The schema of your dataSources, i.e. the entity types, values, and relationships that are available to query, are defined through the [GraphQL Interface Definition Language (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your [subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). @@ -423,7 +423,7 @@ If a block is provided, the metadata is as of that block, if not the latest inde `deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. -`block` नवीनतम ब्लॉकबद्दल माहिती प्रदान करते (`_meta` ला पास केलेल्या कोणत्याही ब्लॉक मर्यादा लक्षात घेऊन): +`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): - hash: the hash of the block - number: the block number From e77da8ae062f9777e574dbc9545815796a60695d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:04 -0500 Subject: [PATCH 0257/1534] New translations graphql-api.mdx (Hindi) --- .../hi/subgraphs/querying/graphql-api.mdx | 110 +++++++++--------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/website/src/pages/hi/subgraphs/querying/graphql-api.mdx b/website/src/pages/hi/subgraphs/querying/graphql-api.mdx index 4467dddb2a61..d3377aa1477b 100644 --- a/website/src/pages/hi/subgraphs/querying/graphql-api.mdx +++ b/website/src/pages/hi/subgraphs/querying/graphql-api.mdx @@ -6,19 +6,19 @@ The Graph में उपयोग किए जाने वाले GraphQL ## GraphQL क्या है? -[GraphQL](https://graphql.org/learn/) APIs के लिए एक क्वेरी भाषा और आपके मौजूदा डेटा के साथ उन क्वेरीज़ को पूरा करने के लिए एक रनटाइम। The Graph सबग्राफ को क्वेरी करने के लिए GraphQL का उपयोग करता है। +[GraphQL](https://graphql.org/learn/) is a query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. -GraphQL की बड़ी भूमिका को समझने के लिए, [developing](/subgraphs/developing/introduction/) और [creating a subgraph](/developing/creating-a-subgraph/) की समीक्षा करें। +To understand the larger role that GraphQL plays, review [developing](/subgraphs/developing/introduction/) and [creating a subgraph](/developing/creating-a-subgraph/). ## GraphQL के साथ क्वेरीज़ -अपने subgraph schema में आप `Entities` नामक प्रकार को परिभाषित करते हैं। प्रत्येक `Entity` प्रकार के लिए, शीर्ष-स्तरीय `Query` प्रकार पर `entity` और `entities` फ़ील्ड उत्पन्न की जाएंगी। +In your subgraph schema you define types called `Entities`. For each `Entity` type, `entity` and `entities` fields will be generated on the top-level `Query` type. -> नोट: The Graph का उपयोग करते समय `query` को `graphql` क्वेरी के शीर्ष पर शामिल करने की आवश्यकता नहीं है। +> Note: `query` does not need to be included at the top of the `graphql` query when using The Graph. ### उदाहरण -आपके स्कीमा में परिभाषित एकल `टोकन` इकाई के लिए प्रश्न: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -29,9 +29,9 @@ GraphQL की बड़ी भूमिका को समझने के ल } ``` -> नोट: एकल entity के लिए क्वेरी करते समय, `id` फ़ील्ड आवश्यक है, और इसे एक स्ट्रिंग के रूप में लिखा जाना चाहिए। +> Note: When querying for a single entity, the `id` field is required, and it must be written as a string. -सभी `टोकन` संस्थाओं को क्वेरी करें: +Query all `Token` entities: ```graphql { @@ -46,8 +46,8 @@ GraphQL की बड़ी भूमिका को समझने के ल जब आप एक संग्रह के लिए क्वेरी कर रहे हों, तो आप: -- `orderBy` पैरामीटर का उपयोग एक विशेष विशेषता के आधार पर क्रमबद्ध करने के लिए करें। -- `orderDirection` का उपयोग क्रम के दिशा को निर्दिष्ट करने के लिए करें, `asc` के लिए आरोही या `desc` के लिए अवरोही। +- Use the `orderBy` parameter to sort by a specific attribute. +- Use the `orderDirection` to specify the sort direction, `asc` for ascending or `desc` for descending. #### उदाहरण @@ -62,7 +62,7 @@ GraphQL की बड़ी भूमिका को समझने के ल #### नेस्टेड इकाई छँटाई के लिए उदाहरण -ग्राफ़ नोड के अनुसार [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) संस्थाओं को क्रमबद्ध किया जा सकता है नेस्टेड संस्थाओं के आधार पर। +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. निम्नलिखित उदाहरण में टोकन उनके मालिक के नाम के अनुसार क्रमबद्ध किए गए हैं: @@ -83,12 +83,12 @@ GraphQL की बड़ी भूमिका को समझने के ल जब एक संग्रह के लिए क्वेरी की जाती है, तो यह सबसे अच्छा होता है: -- `first` पैरामीटर का उपयोग करें ताकि संग्रह के शुरू से पेजिनेट किया जा सके। - - डिफ़ॉल्ट क्रम `ID` के अनुसार बढ़ते अल्फ़ान्यूमेरिक क्रम में होता है, **नहीं** कि निर्माण समय के अनुसार। -- `skip` पैरामीटर का उपयोग करके एंटिटीज़ को छोड़ें और पृष्ठांकन करें। उदाहरण के लिए, `first:100` पहले 100 एंटिटीज़ दिखाता है और `first:100, skip:100` अगली 100 एंटिटीज़ दिखाता है। -- `skip` मानों का उपयोग करने से बचें क्योंकि ये सामान्यतः खराब प्रदर्शन करते हैं। एक बड़े संख्या में आइटम को पुनः प्राप्त करने के लिए, एंटिटीज़ के आधार पर एक गुण के माध्यम से पृष्ठांकन करना सबसे अच्छा है, जैसा कि ऊपर के पिछले उदाहरण में दिखाया गया है। +- Use the `first` parameter to paginate from the beginning of the collection. + - The default sort order is by `ID` in ascending alphanumeric order, **not** by creation time. +- Use the `skip` parameter to skip entities and paginate. For instance, `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. +- Avoid using `skip` values in queries because they generally perform poorly. To retrieve a large number of items, it's best to page through entities based on an attribute as shown in the previous example above. -#### उदाहरण का उपयोग करना `पहले` +#### Example using `first` पहले 10 टोकन पूछें: @@ -101,11 +101,11 @@ GraphQL की बड़ी भूमिका को समझने के ल } ``` -एक संग्रह के बीच में संस्थाओं के समूहों के लिए क्वेरी करने के लिए, `छोड़ें` पैरामीटर का उपयोग `पहले` पैरामीटर के साथ किया जा सकता है ताकि शुरुआत से शुरू होने वाली संस्थाओं की एक निर्दिष्ट संख्या को छोड़ा जा सके संग्रह का। +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### उदाहरण का उपयोग करना `पहले` और `छोड़ें` +#### Example using `first` and `skip` -क्वेरी 10 `टोकन` इकाइयां, संग्रह की शुरुआत से 10 स्थानों से ऑफसेट: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -116,7 +116,7 @@ GraphQL की बड़ी भूमिका को समझने के ल } ``` -#### उदाहरण का उपयोग करना `पहले` और ` id_ge` +#### Example using `first` and `id_ge` यदि एक क्लाइंट को बड़ी संख्या में एंटिटीज़ पुनर्प्राप्त करने की आवश्यकता है, तो एट्रिब्यूट पर आधारित क्वेरी बनाना और उस एट्रिब्यूट द्वारा फ़िल्टर करना अधिक प्रभावशाली है। उदाहरण के लिए, एक क्लाइंट इस क्वेरी का उपयोग करके बड़ी संख्या में टोकन पुनर्प्राप्त कर सकता है: @@ -129,16 +129,16 @@ query manyTokens($lastID: String) { } ``` -पहली बार, यह `lastID = ""` के साथ क्वेरी भेजेगा, और अगले अनुरोधों के लिए यह `lastID` को पिछले अनुरोध में अंतिम एंटिटी के `id` एट्रिब्यूट पर सेट करेगा। यह दृष्टिकोण बढ़ते `skip` मानों का उपयोग करने की तुलना में काफी बेहतर प्रदर्शन करेगा। +The first time, it would send the query with `lastID = ""`, and for subsequent requests it would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. ### छनन -- आप अपनी क्वेरियों में विभिन्न गुणों के लिए फ़िल्टर करने के लिए `where` पैरामीटर का उपयोग कर सकते हैं। -- आप `where` पैरामीटर के भीतर कई मानों पर फ़िल्टर कर सकते हैं। +- You can use the `where` parameter in your queries to filter for different properties. +- You can filter on multiple values within the `where` parameter. -#### उदाहरण का उपयोग करना `कहाँ` +#### Example using `where` -`असफल` परिणाम वाली क्वेरी चुनौतियां: +Query challenges with `failed` outcome: ```graphql { @@ -152,7 +152,7 @@ query manyTokens($lastID: String) { } ``` -मूल्य तुलना के लिए आप `_gt`, `_lte` जैसे प्रत्ययों का उपयोग कर सकते हैं: +You can use suffixes like `_gt`, `_lte` for value comparison: #### श्रेणी फ़िल्टरिंग के लिए उदाहरण @@ -168,7 +168,7 @@ query manyTokens($lastID: String) { #### ब्लॉक फ़िल्टरिंग के लिए उदाहरण -आप `_change_block(number_gte: Int)` के साथ उन एंटिटीज़ को भी फ़िल्टर कर सकते हैं जो किसी निर्दिष्ट ब्लॉक में या उसके बाद अपडेट की गई थीं। +You can also filter entities that were updated in or after a specified block with `_change_block(number_gte: Int)`. This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). @@ -184,7 +184,7 @@ This can be useful if you are looking to fetch only entities which have changed, #### नेस्टेड इकाई फ़िल्टरिंग के लिए उदाहरण -`_` प्रत्यय वाले क्षेत्रों में नेस्टेड संस्थाओं के आधार पर फ़िल्टरिंग संभव है। +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. यह उपयोगी हो सकता है यदि आप केवल उन संस्थाओं को लाना चाहते हैं जिनके चाइल्ड-स्तरीय निकाय प्रदान की गई शर्तों को पूरा करते हैं। @@ -202,11 +202,11 @@ This can be useful if you are looking to fetch only entities which have changed, #### लॉजिकल ऑपरेटर्स -ग्राफ़ नोड [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) के अनुसार आप एकाधिक समूह बना सकते हैं एक से अधिक मानदंडों के आधार पर परिणामों को फ़िल्टर करने के लिए `और` या `या` ऑपरेटरों का उपयोग करते हुए एक ही `जहां` तर्क में पैरामीटर। +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. ##### `AND` Operator -निम्नलिखित उदाहरण में उन चुनौतियों को फ़िल्टर किया गया है जिनका `outcome` `succeeded` है और `number` `100` के बराबर या उससे अधिक है। +The following example filters for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. ```graphql { @@ -220,7 +220,7 @@ This can be useful if you are looking to fetch only entities which have changed, } ``` -> **सिंटैक्टिक शुगर:** आप `और` ऑपरेटर को कॉमा द्वारा अलग किए गए सब-एक्सप्रेशन को पास करके उपरोक्त क्वेरी को सरल बना सकते हैं। +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -236,7 +236,7 @@ This can be useful if you are looking to fetch only entities which have changed, ##### `OR` Operator -निम्नलिखित उदाहरण `outcome` `succeeded` या `number` जो `100` के बराबर या उससे अधिक है, के लिए चुनौतियों को फ़िल्टर करता है। +The following example filters for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. ```graphql { @@ -279,9 +279,9 @@ _not_ends_with _not_ends_with_nocase ``` -> कृपया ध्यान दें कि कुछ प्रत्यय केवल विशिष्ट प्रकारों के लिए समर्थित हैं। उदाहरण के लिए, `बूलियन` केवल `_not`, `_in`, और `_not_in` का समर्थन करता है, लेकिन `_` केवल ऑब्जेक्ट और इंटरफ़ेस प्रकारों के लिए उपलब्ध है। +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -इसके अलावा, निम्न वैश्विक फ़िल्टर `जहां` तर्क के भाग के रूप में उपलब्ध हैं: +In addition, the following global filters are available as part of `where` argument: ```graphql _change_block(number_gte: Int) @@ -291,9 +291,9 @@ _change_block(number_gte: Int) You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. -ऐसी क्वेरी का परिणाम समय के साथ नहीं बदलेगा, यानी, किसी निश्चित अतीत के ब्लॉक पर क्वेरी करने से वही परिणाम मिलेगा चाहे इसे कब भी निष्पादित किया जाए, इसके अलावा यदि आप किसी ब्लॉक पर क्वेरी करते हैं जो श्रृंखला के सिर के बहुत करीब है, तो परिणाम बदल सकता है यदि वह ब्लॉक मुख्य श्रृंखला पर **नहीं** है और श्रृंखला फिर से संगठित हो जाती है। एक बार जब एक ब्लॉक को अंतिम माना जा सकता है, तो क्वेरी का परिणाम नहीं बदलेगा। +The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to **not** be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. -> नोट: वर्तमान कार्यान्वयन अभी भी कुछ सीमाओं के अधीन है जो इन गारंटी का उल्लंघन कर सकती हैं। कार्यान्वयन हमेशा यह नहीं बता सकता कि एक निर्दिष्ट ब्लॉक हैश मुख्य श्रृंखला पर 'नहीं' है, या यदि किसी ब्लॉक हैश द्वारा क्वेरी परिणाम एक ब्लॉक के लिए है जो अभी अंतिम नहीं माना गया है, तो यह क्वेरी के साथ समांतर चल रहे ब्लॉक पुनर्गठन से प्रभावित हो सकता है। ये उन क्वेरी के परिणामों को प्रभावित नहीं करते हैं जो ब्लॉक हैश द्वारा अंतिम और मुख्य श्रृंखला पर ज्ञात होते हैं। [यह समस्या](https://github.com/graphprotocol/graph-node/issues/1405) विस्तार से बताती है कि ये सीमाएँ क्या हैं। +> Note: The current implementation is still subject to certain limitations that might violate these guarantees. The implementation can not always tell that a given block hash is not on the main chain at all, or if a query result by a block hash for a block that is not yet considered final could be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. #### उदाहरण @@ -309,7 +309,7 @@ You can query the state of your entities not just for the latest block, which is } ``` -यह क्वेरी `चैलेंज` संस्थाओं, और उनके संबद्ध `एप्लिकेशन` संस्थाओं को लौटा देगी, क्योंकि वे ब्लॉक संख्या 8,000,000 को संसाधित करने के बाद सीधे मौजूद थीं। +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### उदाहरण @@ -325,26 +325,26 @@ You can query the state of your entities not just for the latest block, which is } ``` -यह क्वेरी `Challenge` संस्थाओं, और उनसे संबद्ध `अनुप्रयोग` संस्थाओं को लौटा देगी, क्योंकि वे दिए गए हैश के साथ ब्लॉक को संसाधित करने के बाद सीधे मौजूद थे। +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. ### पूर्ण पाठ खोज प्रश्न Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph/#defining-fulltext-search-fields) to add fulltext search to your subgraph. -शब्दों की आपूर्ति के लिए पूर्ण पाठ्य अन्वेषण में एक आवश्यक क्षेत्र `पाठ` है। इस `पाठ` खोज क्षेत्र में उपयोग करने के लिए कई विशेष पूर्ण-पाठ पत्र उपलब्ध हैं। +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. पूर्ण पाठ खोज ऑपरेटर: -| प्रतीक | ऑपरेटर | विवरण | -| --- | --- | --- | -| `&` | `And` | सभी प्रदान किए गए शब्दों को शामिल करने वाली संस्थाओं के लिए एक से अधिक खोज शब्दों को फ़िल्टर में संयोजित करने के लिए | -| | | `Or` | या ऑपरेटर द्वारा अलग किए गए एकाधिक खोज शब्दों वाली क्वेरी सभी संस्थाओं को प्रदान की गई शर्तों में से किसी से मेल के साथ वापस कर देगी | -| `<->` | `Follow by` | दो शब्दों के बीच की दूरी निर्दिष्ट करें। | -| `:*` | `Prefix` | उन शब्दों को खोजने के लिए उपसर्ग खोज शब्द का उपयोग करें जिनके उपसर्ग मेल खाते हैं (2 वर्ण आवश्यक हैं।) | +| प्रतीक | ऑपरेटर | Description | +| ------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| `&` | `And` | सभी प्रदान किए गए शब्दों को शामिल करने वाली संस्थाओं के लिए एक से अधिक खोज शब्दों को फ़िल्टर में संयोजित करने के लिए | +| | | `Or` | या ऑपरेटर द्वारा अलग किए गए एकाधिक खोज शब्दों वाली क्वेरी सभी संस्थाओं को प्रदान की गई शर्तों में से किसी से मेल के साथ वापस कर देगी | +| `<->` | `Follow by` | दो शब्दों के बीच की दूरी निर्दिष्ट करें। | +| `:*` | `Prefix` | उन शब्दों को खोजने के लिए उपसर्ग खोज शब्द का उपयोग करें जिनके उपसर्ग मेल खाते हैं (2 वर्ण आवश्यक हैं।) | #### उदाहरण -`or` ऑपरेटर का उपयोग करते हुए, यह क्वेरी ब्लॉग इकाइयों को उनके पूर्ण टेक्स्ट फ़ील्ड में "अराजकतावाद" या "crumpet" की विविधताओं के साथ फ़िल्टर करेगी। +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -357,7 +357,7 @@ Fulltext search query fields provide an expressive text search API that can be a } ``` -`अनुसरण करें` ऑपरेटर पूर्ण टेक्स्ट दस्तावेज़ों में शब्दों को एक विशिष्ट दूरी के अलावा निर्दिष्ट करता है। निम्न क्वेरी सभी ब्लॉगों को "विकेंद्रीकरण" के बाद "दर्शन" के रूपांतरों के साथ वापस कर देगी +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -385,25 +385,25 @@ Combine fulltext operators to make more complex filters. With a pretext search o ### मान्यकरण -ग्राफ नोड लागू करता है [ विनिर्देशन आधारित ](https://spec.graphql.org/October2021/#sec-Validation) इसका उपयोग करके प्राप्त होने वाले ग्राफ़क्यूएल प्रश्नों का सत्यापन [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), जो [पर आधारित है ग्राफकल-जेएस संदर्भ कार्यान्वयन ](https://github.com/graphql/graphql-js/tree/main/src/validation)वे क्वेरीज़ जो सत्यापन नियम को विफल करती हैं, मानक त्रुटि के साथ ऐसा करती हैं - पर जाएँ [ अधिक जानने के लिए ग्राफ़िकल विनिर्देश](https://spec.graphql.org/October2021/#sec-Validation)। +Graph Node अपने द्वारा प्राप्त GraphQL क्वेरी की स्पेसिफिकेशन-आधारित(https://spec.graphql.org/October2021/#sec-Validation) वैलिडेशन करता है, जो graphql-tools-rs(https://github.com/dotansimha/graphql-tools-rs#validation-rules) पर आधारित है, जो graphql-js संदर्भ कार्यान्वयन(https://github.com/graphql/graphql-js/tree/main/src/validation) पर आधारित है। क्वेरी जो वैलिडेशन नियम में विफल होती हैं, वे एक मानक त्रुटि के साथ विफल होती हैं - अधिक जानने के लिए GraphQL स्पेसिफिकेशन(https://spec.graphql.org/October2021/#sec-Validation) पर जाएं। ## योजना -आपके डेटा स्रोत की स्कीमा - यानी, इकाई प्रकार, मान और संबंध जो क्वेरी के लिए उपलब्ध हैं - को [GraphQL इंटरफ़ेस डेफिनिशन लैंग्वेज (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System) के माध्यम से परिभाषित किया गया है। +आपके डेटा स्रोतों का स्कीमा, अर्थात् उपलब्ध प्रश्न करने के लिए संस्थाओं की प्रकार, मान और उनके बीच के संबंध, GraphQL Interface Definition Language (IDL)(https://facebook.github.io/graphql/draft/#sec-Type-System) के माध्यम से परिभाषित किए गए हैं। -ग्राफक्यूएल स्कीमा आम तौर पर `क्वेरी`, `सदस्यता` और `म्यूटेशन` के रूट प्रकारों को परिभाषित करते हैं। ग्राफ़ केवल `क्वेरी` का समर्थन करता है। आपके सबग्राफ के लिए रूट `क्वेरी` प्रकार स्वचालित रूप से आपके [Subgraph manifest](/developing/creating-a-subgraph/#components-of-a-subgraph). में शामिल ग्राफ़क्यूएल स्कीमा से उत्पन्न होता है। +GraphQL स्कीमा आम तौर पर queries, subscriptions और mutations के लिए रूट प्रकार परिभाषित करते हैं। The Graph केवल queries का समर्थन करता है। आपके सबग्राफ के लिए रूट Query प्रकार स्वचालित रूप से उस GraphQL स्कीमा से उत्पन्न होता है जो आपके सबग्राफ manifest(/developing/creating-a-subgraph/#components-of-a-subgraph) में शामिल होता है। > ध्यान दें: हमारा एपीआई म्यूटेशन को उजागर नहीं करता है क्योंकि डेवलपर्स से उम्मीद की जाती है कि वे अपने एप्लिकेशन से अंतर्निहित ब्लॉकचेन के खिलाफ सीधे लेन-देन(transaction) जारी करेंगे। ### Entities -All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. +आपके स्कीमा में जिन भी GraphQL प्रकारों में @entity निर्देश होते हैं, उन्हें संस्थाएँ (entities) माना जाएगा और उनमें एक ID फ़ील्ड होना चाहिए। -> **ध्यान दें:** वर्तमान में, आपके स्कीमा के सभी प्रकारों में एक `@entity` निर्देश होना चाहिए। भविष्य में, हम किसी `@entity` निर्देश के बिना प्रकारों को मान ऑब्जेक्ट के रूप में मानेंगे, लेकिन यह अभी तक समर्थित नहीं है। +> नोट: वर्तमान में, आपकी स्कीमा में सभी प्रकारों में @entity निर्देश होना चाहिए। भविष्य में, हम उन प्रकारों को मूल्य वस्तुएं मानेंगे जिनमें @entity निर्देश नहीं होगा, लेकिन यह अभी तक समर्थित नहीं है। ### सबग्राफ मेटाडेटा -सभी सबग्राफ में एक स्वतः उत्पन्न `_Meta_` ऑब्जेक्ट होता है, जो सबग्राफ मेटाडेटा तक पहुंच प्रदान करता है। इस प्रकार पूछताछ की जा सकती है: +सभी सबग्राफमें एक स्वचालित रूप से जनरेट किया गया _Meta_ ऑब्जेक्ट होता है, जो Subgraph मेटाडेटा तक पहुँच प्रदान करता है। इसे इस प्रकार क्वेरी किया जा सकता है: ```graphQL { @@ -421,12 +421,12 @@ All GraphQL types with `@entity` directives in your schema will be treated as en If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. -`deployment` एक अद्वितीय आईडी है, जो `subgraph.yaml` फ़ाइल के IPFS CID के अनुरूप है। +deployment एक विशिष्ट ID है, जो subgraph.yaml फ़ाइल के IPFS CID के अनुरूप है। -`block` नवीनतम ब्लॉक के बारे में जानकारी प्रदान करता है (`_meta` को पारित किसी भी ब्लॉक बाधाओं को ध्यान में रखते हुए): +block नवीनतम ब्लॉक के बारे में जानकारी प्रदान करता है (किसी भी ब्लॉक सीमाओं को ध्यान में रखते हुए जो कि _meta में पास की जाती हैं): - हैश: ब्लॉक का हैश - नंबर: ब्लॉक नंबर - टाइमस्टैम्प: ब्लॉक का टाइमस्टैम्प, यदि उपलब्ध हो (यह वर्तमान में केवल ईवीएम नेटवर्क को इंडेक्स करने वाले सबग्राफ के लिए उपलब्ध है) -`hasIndexingErrors` एक बूलियन है जो यह पहचानता है कि सबग्राफ को कुछ पिछले ब्लॉक में इंडेक्सिंग त्रुटियों का सामना करना पड़ा या नहीं +hasIndexingErrors एक बूलियन है जो यह पहचानता है कि क्या सबग्राफ ने किसी पिछले ब्लॉक पर इंडेक्सिंग त्रुटियों का सामना किया था। From ca229fa80cb9ad5e39ef4bd064dd75fea5f23e89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:05 -0500 Subject: [PATCH 0258/1534] New translations python.mdx (Romanian) --- website/src/pages/ro/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/querying/python.mdx b/website/src/pages/ro/subgraphs/querying/python.mdx index 4fb618510070..0937e4f7862d 100644 --- a/website/src/pages/ro/subgraphs/querying/python.mdx +++ b/website/src/pages/ro/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From da592d38c1e30b2ce98cb3ae367da4cba5ad4db6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:06 -0500 Subject: [PATCH 0259/1534] New translations python.mdx (French) --- website/src/pages/fr/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/fr/subgraphs/querying/python.mdx b/website/src/pages/fr/subgraphs/querying/python.mdx index 266a5f2511b7..f8d2b0741c18 100644 --- a/website/src/pages/fr/subgraphs/querying/python.mdx +++ b/website/src/pages/fr/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Interroger The Graph avec Python et Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds est une librairie Python utilisée pour les requêtes Subgraph. Cette librairie a été conçue par [Playgrounds](https://playgrounds.network/). Subgrounds permet de connecter directement les données d'un Subgraph à un environnement de données Python, permettant l'utilisation de librairies comme [pandas](https://pandas.pydata.org/) afin de faire de l'analyse de données! @@ -53,4 +54,4 @@ Subgrounds est développé et maintenu par l'équipe de [Playgrounds](https://pl - [Requêtes concurrentes](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Améliorez vos requêtes en les parallélisant. - [Export de données en CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - Un bref article sur comment sauvegarder vos données sous forme de fichiers CSV en vue d'une analyse ultérieure. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From ed51135e00783dc06541ef156e27b9d8c3a8faec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:07 -0500 Subject: [PATCH 0260/1534] New translations python.mdx (Spanish) --- website/src/pages/es/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/querying/python.mdx b/website/src/pages/es/subgraphs/querying/python.mdx index 33829e8be26f..d51fd5deb007 100644 --- a/website/src/pages/es/subgraphs/querying/python.mdx +++ b/website/src/pages/es/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 40d5f0396ae180f3eef50fdd622bc69060ee64f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:08 -0500 Subject: [PATCH 0261/1534] New translations python.mdx (Arabic) --- website/src/pages/ar/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ar/subgraphs/querying/python.mdx b/website/src/pages/ar/subgraphs/querying/python.mdx index 4fb618510070..0937e4f7862d 100644 --- a/website/src/pages/ar/subgraphs/querying/python.mdx +++ b/website/src/pages/ar/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 9f2d0e1118abe36cb43d007f3514b9020c31db1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:09 -0500 Subject: [PATCH 0262/1534] New translations python.mdx (Czech) --- website/src/pages/cs/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/querying/python.mdx b/website/src/pages/cs/subgraphs/querying/python.mdx index 5d92e1d18aa1..669e95c19183 100644 --- a/website/src/pages/cs/subgraphs/querying/python.mdx +++ b/website/src/pages/cs/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds je intuitivní knihovna Pythonu pro dotazování na podgrafy, vytvořená [Playgrounds](https://playgrounds.network/). Umožňuje přímo připojit data subgrafů k datovému prostředí Pythonu, což vám umožní používat knihovny jako [pandas](https://pandas.pydata.org/) k provádění analýzy dat! @@ -53,4 +54,4 @@ Vzhledem k tomu, že subgrounds má rozsáhlou sadu funkcí, které je třeba pr - [Souběžné dotazy](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Zjistěte, jak zvýšit úroveň dotazů jejich paralelizací. - [Export dat do CSV](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - Stručný článek o tom, jak bez problémů ukládat data ve formátu CSV pro další analýzu. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 5725017175218da37094a74c241c46d865a91b60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:10 -0500 Subject: [PATCH 0263/1534] New translations python.mdx (German) --- website/src/pages/de/subgraphs/querying/python.mdx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/subgraphs/querying/python.mdx b/website/src/pages/de/subgraphs/querying/python.mdx index 4fb618510070..a6640d513d6e 100644 --- a/website/src/pages/de/subgraphs/querying/python.mdx +++ b/website/src/pages/de/subgraphs/querying/python.mdx @@ -1,12 +1,13 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. -## Getting Started +## Erste Schritte Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From d7349eab3926d1fff01fa7c801ef44db1edf5cc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:11 -0500 Subject: [PATCH 0264/1534] New translations python.mdx (Italian) --- website/src/pages/it/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/querying/python.mdx b/website/src/pages/it/subgraphs/querying/python.mdx index bd61ea0350fa..55cae50be8a9 100644 --- a/website/src/pages/it/subgraphs/querying/python.mdx +++ b/website/src/pages/it/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds è una libreria Python intuitiva per query dei subgraph, realizzata da [Playgrounds](https://playgrounds.network/). Permette di collegare direttamente i dati dei subgraph a un ambiente dati Python, consentendo di utilizzare librerie come [pandas](https://pandas.pydata.org/) per eseguire analisi dei dati! @@ -53,4 +54,4 @@ Poiché le funzionalità di subgrounds sono numerose e tutte da esplorare, ecco - [Query concorrenti](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Imparate a migliorare le vostre query parallelizzandole. - [Esportazione di dati in CSV](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - Un rapido articolo su come salvare senza problemi i dati in formato CSV per ulteriori analisi. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 28c87e96b1a3c4ccfb91353df9382315abffb887 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:12 -0500 Subject: [PATCH 0265/1534] New translations python.mdx (Japanese) --- website/src/pages/ja/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/querying/python.mdx b/website/src/pages/ja/subgraphs/querying/python.mdx index 138558a6c06d..4a42ae3275b4 100644 --- a/website/src/pages/ja/subgraphs/querying/python.mdx +++ b/website/src/pages/ja/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgroundsは、[Playgrounds](https://playgrounds.network/)によって構築された、サブグラフをクエリするための直感的なPythonライブラリです。サブグラフデータを直接Pythonデータ環境に接続し、[pandas](https://pandas.pydata.org/)のようなライブラリを使用してデータ分析を行うことができます! @@ -53,4 +54,4 @@ Subgroundsには多くの機能があるので、まずはここから始めま - [並行クエリ](https://docs.playgrounds.network/subgrounds/getting_started/async/) - クエリを並列化することで、クエリをレベルアップする方法を紹介します。 - [データをCSVにエクスポートする](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - データをCSVとして保存し、さらに分析する方法についての簡単な記事です。 + - A quick article on how to seamlessly save your data as CSVs for further analysis. From bf53e21c0cd3858e9a808d407feee58177f9d5f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:13 -0500 Subject: [PATCH 0266/1534] New translations python.mdx (Korean) --- website/src/pages/ko/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/querying/python.mdx b/website/src/pages/ko/subgraphs/querying/python.mdx index 4fb618510070..0937e4f7862d 100644 --- a/website/src/pages/ko/subgraphs/querying/python.mdx +++ b/website/src/pages/ko/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 0a5f50400562349496441ab54e77cf82b094924e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:14 -0500 Subject: [PATCH 0267/1534] New translations python.mdx (Dutch) --- website/src/pages/nl/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/querying/python.mdx b/website/src/pages/nl/subgraphs/querying/python.mdx index 4fb618510070..0937e4f7862d 100644 --- a/website/src/pages/nl/subgraphs/querying/python.mdx +++ b/website/src/pages/nl/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From aa3412ea7ebbb54f0a18a464e63540a161b5193d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:15 -0500 Subject: [PATCH 0268/1534] New translations python.mdx (Polish) --- website/src/pages/pl/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/querying/python.mdx b/website/src/pages/pl/subgraphs/querying/python.mdx index 4fb618510070..0937e4f7862d 100644 --- a/website/src/pages/pl/subgraphs/querying/python.mdx +++ b/website/src/pages/pl/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 92e8812b5d8c84860e42c2b8d476b664a7c2af5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:16 -0500 Subject: [PATCH 0269/1534] New translations python.mdx (Portuguese) --- website/src/pages/pt/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/pt/subgraphs/querying/python.mdx b/website/src/pages/pt/subgraphs/querying/python.mdx index f8180ed19eb8..ced5c995611e 100644 --- a/website/src/pages/pt/subgraphs/querying/python.mdx +++ b/website/src/pages/pt/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Queries no The Graph com Python e Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds é uma biblioteca intuitiva em Python para a construção de subgraphs, construída pela [Playgrounds](https://playgrounds.network/). Ela permite-lhe conectar diretamente dados de subgraph a um ambiente de dados em Python e usar bibliotecas como [pandas](https://pandas.pydata.org/) para realizar análises de dados! @@ -53,4 +54,4 @@ Como o subgrounds tem um grande conjunto de recursos para explorar, aqui estão - [Queries Concorrentes](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Aprenda como subir o nível das suas queries ao paralelizá-las. - [Exportação de Dados em CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - Um artigo rápido sobre como salvar os seus dados como CSVs para análise futura. + - Um artigo rápido sobre como salvar os seus dados como arquivos CSV, para análise futura. From a8864fe8d6ae6c8f34b5d9bc2e4c4b34622b2b6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:17 -0500 Subject: [PATCH 0270/1534] New translations python.mdx (Russian) --- website/src/pages/ru/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ru/subgraphs/querying/python.mdx b/website/src/pages/ru/subgraphs/querying/python.mdx index 1b58a2fee929..b450ba9276de 100644 --- a/website/src/pages/ru/subgraphs/querying/python.mdx +++ b/website/src/pages/ru/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From fbd247d77113503e4462307db6a8684c1be432c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:18 -0500 Subject: [PATCH 0271/1534] New translations python.mdx (Swedish) --- website/src/pages/sv/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/sv/subgraphs/querying/python.mdx b/website/src/pages/sv/subgraphs/querying/python.mdx index 0de1918fddf2..213b45f144b3 100644 --- a/website/src/pages/sv/subgraphs/querying/python.mdx +++ b/website/src/pages/sv/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From ef1fbde0959afd49b49fce99605877a957ae4640 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:19 -0500 Subject: [PATCH 0272/1534] New translations python.mdx (Turkish) --- website/src/pages/tr/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/tr/subgraphs/querying/python.mdx b/website/src/pages/tr/subgraphs/querying/python.mdx index fc375df89dcd..dc82e0010623 100644 --- a/website/src/pages/tr/subgraphs/querying/python.mdx +++ b/website/src/pages/tr/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds, [Playgrounds](https://playgrounds.network/) tarafından oluşturulmuş, subgraph sorgulamak için kullanılan sezgisel bir Python kütüphanesidir. Bu kütüphane, subgraph verilerini doğrudan bir Python veri ortamına bağlamanıza olanak tanır ve [pandas](https://pandas.pydata.org/) gibi kütüphaneleri kullanarak veri analizi yapmanıza imkan sağlar! @@ -53,4 +54,4 @@ Subgrounds'un keşfedilecek geniş bir özellik seti bulunduğundan, işe bazı - [Eşzamanlı Sorgular](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Sorgularınızı paralelleştirerek nasıl geliştireceğinizi öğrenin. - [Veriyi CSV dosyalarına aktarma](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - Başka bir analiz için verilerinizi sorunsuz bir şekilde CSV olarak kaydetme hakkında hızlı bir makale. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From bc30dc990ec7aae86b1a670d75eb7ea8f1246cd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:20 -0500 Subject: [PATCH 0273/1534] New translations python.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/querying/python.mdx b/website/src/pages/uk/subgraphs/querying/python.mdx index 4fb618510070..0937e4f7862d 100644 --- a/website/src/pages/uk/subgraphs/querying/python.mdx +++ b/website/src/pages/uk/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 1d6db471827300794f87e43f310054ff8d7c6048 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:21 -0500 Subject: [PATCH 0274/1534] New translations python.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/querying/python.mdx b/website/src/pages/zh/subgraphs/querying/python.mdx index 621b1a12cd4a..a1372fbf300d 100644 --- a/website/src/pages/zh/subgraphs/querying/python.mdx +++ b/website/src/pages/zh/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 854863671817a9bf1156e1828c153c902583319b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:22 -0500 Subject: [PATCH 0275/1534] New translations python.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/querying/python.mdx b/website/src/pages/ur/subgraphs/querying/python.mdx index 6b8b21e6c059..b5abcce57b6d 100644 --- a/website/src/pages/ur/subgraphs/querying/python.mdx +++ b/website/src/pages/ur/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 52141b7f66d6ee9914730c714a9e92fbfa77d0fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:23 -0500 Subject: [PATCH 0276/1534] New translations python.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/querying/python.mdx b/website/src/pages/vi/subgraphs/querying/python.mdx index 4fb618510070..0937e4f7862d 100644 --- a/website/src/pages/vi/subgraphs/querying/python.mdx +++ b/website/src/pages/vi/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From ec295923139cdcd59e1eb8e75a3d2cfb1a1282b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:24 -0500 Subject: [PATCH 0277/1534] New translations python.mdx (Marathi) --- website/src/pages/mr/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/querying/python.mdx b/website/src/pages/mr/subgraphs/querying/python.mdx index ea5f6a0d0639..020814827402 100644 --- a/website/src/pages/mr/subgraphs/querying/python.mdx +++ b/website/src/pages/mr/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - A quick article on how to seamlessly save your data as CSVs for further analysis. From 0e42137c8c7ca23cf913cadb793e416987265cd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:25 -0500 Subject: [PATCH 0278/1534] New translations python.mdx (Hindi) --- website/src/pages/hi/subgraphs/querying/python.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/hi/subgraphs/querying/python.mdx b/website/src/pages/hi/subgraphs/querying/python.mdx index 39d99f9efa31..22e9b71da321 100644 --- a/website/src/pages/hi/subgraphs/querying/python.mdx +++ b/website/src/pages/hi/subgraphs/querying/python.mdx @@ -1,5 +1,6 @@ --- title: Query The Graph with Python and Subgrounds +sidebarTitle: Python (Subgrounds) --- Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! @@ -53,4 +54,4 @@ Since subgrounds has a large feature set to explore, here are some helpful start - [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) - Learn how to level up your queries by parallelizing them. - [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) - - A quick article on how to seemlessly save your data as CSVs for further analysis. + - कैसे अपने डेटा को CSVs के रूप में सहजता से सहेजें ताकि आगे के विश्लेषण के लिए इसका उपयोग किया जा सके। From ce4ce598acfc36cf76a2bcd423a5ffc05cefc4f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:26 -0500 Subject: [PATCH 0279/1534] New translations arbitrum-faq.mdx (French) --- .../src/pages/fr/archived/arbitrum/arbitrum-faq.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx index 4189fe9885c4..b2f6d7382c61 100644 --- a/website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/fr/archived/arbitrum/arbitrum-faq.mdx @@ -20,11 +20,11 @@ La communauté Graph a décidé d'avancer avec Arbitrum l'année dernière aprè ## Que dois-je faire pour utiliser The Graph en L2 ? -Le système de facturation de The Graph accepte le GRT sur Arbitrum, et les utilisateurs auront besoin d'ETH sur Arbitrum pour payer leurs frais de gaz. Bien que le protocole The Graph ait commencé sur Ethereum Mainnet, toute l'activité, y compris les contrats de facturation, est maintenant sur Arbitrum One. +Le système de facturation de The Graph accepte le GRT sur Arbitrum, et les utilisateurs devront disposer d'ETH sur Arbitrum pour payer le gaz. Bien que le protocole The Graph ait commencé sur le réseau principal d'Ethereum, toutes les activités, y compris les contrats de facturation, sont désormais réalisées sur Arbitrum One. Par conséquent, pour payer les requêtes, vous avez besoin de GRT sur Arbitrum. Voici quelques façons d'y parvenir : -- Si vous avez déjà des GRT sur Ethereum, vous pouvez les bridge vers Arbitrum. Vous pouvez le faire via l'option de bridge de GRT fournie dans Subgraph Studio ou en utilisant l'un des bridges suivants : +- Si vous avez déjà des GRT sur Ethereum, vous pouvez les transférer vers Arbitrum. Vous pouvez le faire via l'option de transfert de GRT fournie dans Subgraph Studio ou en utilisant l'un des ponts suivants : - [Le Bridge Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) @@ -41,7 +41,7 @@ Pour tirer parti de l'utilisation de The Graph sur L2, utilisez ce sélecteur d ## En tant que développeur de subgraphs, consommateur de données, indexeur, curateur ou délégateur, que dois-je faire maintenant ? -Les participants du réseau doivent passer à Arbitrum pour continuer à participer à The Graph Network. Veuillez consulter le [Guide de l'outil de transfert L2](/archived/arbitrum/l2-transfer-tools-guide/) pour une assistance supplémentaire. +Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. Toutes les récompenses d'indexation sont désormais entièrement sur Arbitrum. @@ -53,7 +53,7 @@ Tout a été testé minutieusement et un plan d’urgence est en place pour assu ## Les subgraphs existants sur Ethereum fonctionnent  t-ils? -Tous les subgraphs sont désormais sur Arbitrum. Veuillez consulter le [Guide de l'outil de transfert L2](/archived/arbitrum/l2-transfer-tools-guide/) pour vous assurer que vos subgraphs fonctionnent sans problème. +All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. ## GRT a-t-il un nouveau contrat intelligent déployé sur Arbitrum ? @@ -77,4 +77,4 @@ Le pont a été [fortement audité](https://code4rena.com/contests/2022-10-the-g L'ajout de GRT à votre solde de facturation Arbitrum peut être effectué en un seul clic dans [Subgraph Studio](https://thegraph.com/studio/). Vous pourrez facilement relier votre GRT à Arbitrum et remplir vos clés API en une seule transaction. -Visitez la [page de Facturation](/facturation/) pour obtenir des instructions plus détaillées sur l'ajout, le retrait ou l'acquisition de GRT. +Visit the [Billing page](/subgraphs/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. From 50d28150c390e5f43e5e9d4ca0149eea3e1c1091 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:28 -0500 Subject: [PATCH 0280/1534] New translations arbitrum-faq.mdx (Arabic) --- .../ar/archived/arbitrum/arbitrum-faq.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx index ea4e5f0b2872..898175b05cad 100644 --- a/website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/ar/archived/arbitrum/arbitrum-faq.mdx @@ -1,5 +1,5 @@ --- -title: الأسئلة الشائعة حول Arbitrum +title: Arbitrum FAQ --- Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. @@ -18,7 +18,7 @@ Scaling the protocol smart contracts onto L2 allows network participants to inte The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. -## ما الذي يجب علي فعله لاستخدام The Graph في L2؟ +## What do I need to do to use The Graph on L2? The Graph’s billing system accepts GRT on Arbitrum, and users will need ETH on Arbitrum to pay their gas. While The Graph protocol started on Ethereum Mainnet, all activity, including the billing contracts, is now on Arbitrum One. @@ -35,7 +35,7 @@ Consequently, to pay for queries, you need GRT on Arbitrum. Here are a few diffe Once you have GRT on Arbitrum, you can add it to your billing balance. -للاستفادة من استخدام The Graph على L2 ، استخدم قائمة المنسدلة للتبديل بين الشبكات. +To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) @@ -59,21 +59,21 @@ All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/arc Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. -## الأسئلة الشائعة حول إعداد الفواتير في Arbitrum +## Billing on Arbitrum FAQs -## ما الذي علي فعله بشأن ال GRT في حساب الفوترة الخاص بي ؟ +## What do I need to do about the GRT in my billing balance? -لا شئ! لقد تم نقل GRT الخاصة بك بشكل آمن إلى Arbitrum ويتم استخدامها للدفع مقابل الاستعلامات. +Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. -## كيف أعرف أن أموالي قد انتقلت بشكل آمن إلى Arbitrum؟ +## How do I know my funds have migrated securely to Arbitrum? All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -## كيف أعرف أن جسر Arbitrum آمن؟ +## How do I know the Arbitrum bridge is secure? The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. -## ماذا علي أن أفعل إذا قمت بإضافة GRT جديد من محفظة Ethereum mainnet الخاصة بي؟ +## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. From 4fc27a7f7c1bc854870dc57df46873b2b8cbf308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:29 -0500 Subject: [PATCH 0281/1534] New translations arbitrum-faq.mdx (Czech) --- website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx index 56c377bdfe10..050d1a0641aa 100644 --- a/website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/cs/archived/arbitrum/arbitrum-faq.mdx @@ -77,4 +77,4 @@ Most byl [podroben důkladnému auditu](https://code4rena.com/contests/2022-10-t Přidání GRT do vašeho zúčtovacího zůstatku Arbitrum lze provést jedním kliknutím v aplikaci [Subgraph Studio](https://thegraph.com/studio/). Budete moci snadno propojit své GRT s Arbitrum a vyplnit své klíče API v rámci jedné transakce. -Podrobnější pokyny k přidávání, odebírání nebo získávání GRT najdete na stránce [Billing page](/subgraphs/billing/). +Visit the [Billing page](/subgraphs/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. From b1960e601da6192e76e23e1e8daa1beef84700d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:30 -0500 Subject: [PATCH 0282/1534] New translations arbitrum-faq.mdx (German) --- website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx index 010ccbb6feb1..54809f94fd9c 100644 --- a/website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/de/archived/arbitrum/arbitrum-faq.mdx @@ -41,7 +41,7 @@ Um die Vorteile von The Graph auf L2 zu nutzen, verwenden Sie diesen Dropdown-Sc ## Was muss ich als Entwickler von Subgraphen, Datenkonsument, Indexer, Kurator oder Delegator jetzt tun? -Die Netzwerkteilnehmer müssen zu Arbitrum wechseln, um weiterhin am The Graph Netzwerk teilzunehmen. Bitte lesen Sie den [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) für zusätzliche Unterstützung. +Netzwerk-Teilnehmer müssen zu Arbitrum wechseln, um weiterhin am The Graph Network teilnehmen zu können. Weitere Unterstützung finden Sie im [Leitfaden zum L2 Transfer Tool](/archived/arbitrum/l2-transfer-tools-guide/). Alle Indexierungsprämien sind jetzt vollständig auf Arbitrum. @@ -53,7 +53,7 @@ Alles wurde gründlich getestet, und es gibt einen Notfallplan, um einen sichere ## Funktionieren die vorhandenen Subgraphen auf Ethereum? -Alle Subgraphen sind jetzt auf Arbitrum. Bitte lesen Sie den [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/), um sicherzustellen, dass Ihre Subgraphen reibungslos funktionieren. +Alle Subgraphen sind jetzt auf Arbitrum. Bitte lesen Sie den [Leitfaden zum L2 Transfer Tool](/archived/arbitrum/l2-transfer-tools-guide/), um sicherzustellen, dass Ihre Subgraphen reibungslos funktionieren. ## Verfügt GRT über einen neuen Smart Contract, der auf Arbitrum eingesetzt wird? @@ -77,4 +77,4 @@ Die Brücke wurde [umfangreich geprüft] (https://code4rena.com/contests/2022-10 Das Hinzufügen von GRT zu Ihrem Arbitrum-Abrechnungssaldo kann mit nur einem Klick in [Subgraph Studio] (https://thegraph.com/studio/) erfolgen. Sie können Ihr GRT ganz einfach mit Arbitrum verbinden und Ihre API-Schlüssel in einer einzigen Transaktion füllen. -Besuchen Sie die [Abrechnungsseite] (/) für detaillierte Anweisungen zum Hinzufügen, Abheben oder Erwerben von GRT. +Visit the [Billing page](/subgraphs/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. From 39c7a5b197e207bd9a0d9c46f2df89838423c620 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:33 -0500 Subject: [PATCH 0283/1534] New translations arbitrum-faq.mdx (Portuguese) --- website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx index 3e8f82868222..0c1ba5b192ef 100644 --- a/website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/pt/archived/arbitrum/arbitrum-faq.mdx @@ -20,11 +20,11 @@ A comunidade do The Graph prosseguiu com o Arbitrum no ano passado, após o resu ## O que preciso fazer para usar o The Graph na L2? -O sistema de cobranças do The Graph aceita GRT no Arbitrum, e utilizadores precisarão de ETH no Arbitrum para pagar pelo seu gas. O protocolo do The Graph começou na Mainnet do Ethereum, mas todas as atividades, que incluem os contratos de cobranças, agora se encontram no Arbitrum One. +O sistema de cobranças do The Graph aceita GRT no Arbitrum, e os utilizadores precisarão de ETH no Arbitrum para pagar pelo seu gas. Enquanto o protocolo do The Graph começou na Mainnet do Ethereum, todas as atividades, inclusive os contratos de cobranças, agora ficam no Arbitrum One. Portanto, para pagar por queries, é necessário GRT no Arbitrum. Aqui estão algumas maneiras diferentes de consegui-lo: -- Caso já tenha GRT no Ethereum, é possível enviá-lo ao Arbitrum via bridge. Isto pode ser feito via a opção de bridging de GRT providenciada no Subgraph Studio ou uma das seguintes bridges: +- Caso já tenha GRT no Ethereum, é possível enviá-lo ao Arbitrum via bridge. Isto é possível via a opção de bridging de GRT providenciada no Subgraph Studio ou uma das seguintes bridges: - [Bridge no Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) @@ -77,4 +77,4 @@ O bridge foi [auditado rigorosamente](https://code4rena.com/contests/2022-10-the A adição de GRT ao seu saldo de cobrança no Arbitrum pode ser feito em apenas um clique no [Subgraph Studio](https://thegraph.com/studio/). Você poderá fazer o bridge do seu GRT ao Arbitrum com facilidade, e preencher as suas chaves de API em uma única transação. -Visite a página de [Cobranças](/) para mais detalhes sobre como depositar, retirar, ou adquirir GRT. +Visit the [Billing page](/subgraphs/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. From ce79076c106e2433ad486a0672603483e3ae0b6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:34 -0500 Subject: [PATCH 0284/1534] New translations arbitrum-faq.mdx (Russian) --- website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx index 7449536ed55e..0375e85a7135 100644 --- a/website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/ru/archived/arbitrum/arbitrum-faq.mdx @@ -41,7 +41,7 @@ title: Часто задаваемые вопросы об Arbitrum ## Что мне нужно делать сейчас как разработчику субграфа, потребителю данных, индексатору, куратору или делегатору? -Участники сети должны перейти на Arbitrum, чтобы продолжить участие в The Graph Network. Пожалуйста, обратитесь к [Руководству по инструменту переноса на L2](/archived/arbitrum/l2-transfer-tools-guide/) для получения дополнительной помощи. +Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. Все вознаграждения за индексацию теперь полностью находятся на Arbitrum. @@ -53,7 +53,7 @@ title: Часто задаваемые вопросы об Arbitrum ## Работают ли существующие субграфы на Ethereum? -Все субграфы теперь находятся на Arbitrum. Пожалуйста, обратитесь к [Руководству по инструменту переноса на L2](/arbitum/l2-transfer-tools-guide/), чтобы убедиться в бесперебойной работе своих субграфов. +All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. ## Есть ли у GRT новый смарт-контракт, развернутый на Arbitrum? @@ -77,4 +77,4 @@ title: Часто задаваемые вопросы об Arbitrum Добавление GRT к Вашему балансу для оплаты на Arbitrum можно выполнить в один клик в [Subgraph Studio](https://thegraph.com/studio/). Вы сможете легко перенести свои GRT на Arbitrum и ввести свои API-ключи в одной транзакции. -Посетите страницу [Billing page](/subgraphs/billing/) для получения более подробных инструкций по добавлению, выводу или приобретению GRT. +Visit the [Billing page](/subgraphs/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. From 01846c0790e924eff48687e3095ac9a120fdc37b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:36 -0500 Subject: [PATCH 0285/1534] New translations arbitrum-faq.mdx (Turkish) --- .../src/pages/tr/archived/arbitrum/arbitrum-faq.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx index 2d4571f7a6cf..ca32d52975dc 100644 --- a/website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/tr/archived/arbitrum/arbitrum-faq.mdx @@ -20,11 +20,11 @@ Graph topluluğu, geçen yıl [GIP-0031](https://forum.thegraph.com/t/gip-0031-a ## Graph'ı Katman2'de kullanmak için ne yapmam gerekiyor? -The Graph'in faturalandırma sistemi, Arbitrum ağı üzerinde GRT ile ödeme alır. Kullanıcıların gaz ücretlerini ödemek için Arbitrum üzerinde ETH'ye ihtiyaçları olacaktır. The Graph protokolü, başlangıçta Ethereum Mainnet üzerinde başlatılmış olsa da, faturalandırma sözleşmeleri de dahil olmak üzere tüm faaliyetler artık Arbitrum One üzerinde gerçekleştirilmektedir. +The Graph'in faturalandırma sistemi, Arbitrum ağı üzerinde GRT ile ödeme alır. Kullanıcıların gas ücretlerini ödemek için Arbitrum üzerinde ETH'ye ihtiyaçları olacaktır. The Graph protokolü, başlangıçta Ethereum Mainnet üzerinde başlatılmış olsa da, faturalandırma sözleşmeleri de dahil olmak üzere tüm faaliyetler artık Arbitrum One üzerinde gerçekleştirilmektedir. Dolayısıyla, sorgu ödemelerini yapmak için Arbitrum üzerinde GRT'ye ihtiyacınız var. İşte GRT elde etmenin birkaç farklı yolu: -- Ethereum üzerinde halihazırda GRT'leriniz varsa, bunları Arbitrum'a köprüleyebilirsiniz. Bu işlemi Subgraph Studio'da sunulan GRT köprüleme seçeneği aracılığıyla veya aşağıdaki köprülerden birini kullanarak yapabilirsiniz: +- Ethereum üzerinde zaten GRT'niz varsa, bunu Arbitrum'a köprüleyebilirsiniz. Bunu, Subgraph Studio'da sunulan GRT köprüleme seçeneği aracılığıyla veya aşağıdaki köprülerden birini kullanarak yapabilirsiniz: - [Arbitrum Köprüsü](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) @@ -41,7 +41,7 @@ Graph'ı Katman2'de kullanmanın avantajlarından yararlanmak için, zincirler a ## Bir subgraph geliştirici, veri tüketicisi, Endeksleyici, Küratör veya Delegatör olarak şimdi ne yapmalıyım? -Ağ katılımcıları, The Graph Ağı'nda katılım göstermeye devam edebilmek için Arbitrum'a geçmek zorundadır. Ek destek için [L2 Transfer Aracı Kılavuzu](/archived/arbitrum/l2-transfer-tools-guide/) sayfasına bakınız. +Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. Tüm endeksleme ödülleri artık tamamen Arbitrum üzerindedir. @@ -53,7 +53,7 @@ Güvenli ve sorunsuz bir geçiş sağlamak için her şey kapsamlı bir şekilde ## Ethereum üzerindeki mevcut subgraph'ler çalışıyor mu? -Tüm subgraph'ler artık Arbitrum üzerindedir. Subgraph'lerinizin sorunsuz bir şekilde çalıştığından emin olmak için [L2 Transfer Aracı Kılavuzu](/archived/arbitrum/l2-transfer-tools-guide/) sayfasını inceleyin. +All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. ## GRT'nin Arbitrum'da yeni bir akıllı sözleşmesi mi var? @@ -77,4 +77,4 @@ Köprü, tüm kullanıcılar için emniyet ve güvenliği sağlamak amacıyla [k GRT'yi Arbitrum faturalandırma bakiyenize eklemek [Subgraph Stüdyo'da](https://thegraph.com/studio/) tek bir tık ile yapılabilir. GRT'nizi Arbitrum'a kolayca köprüleyebilecek ve API anahtarlarınızı tek bir işlemle doldurabileceksiniz. -GRT ekleme, çekme veya satın alma hakkında daha ayrıntılı talimatlar için [Faturalandırma sayfasını](/subgraphs/billing/) ziyaret edin. +Visit the [Billing page](/subgraphs/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. From 06bd670de8e88c5e1062e5dbac9e2d879c0e2eb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:37 -0500 Subject: [PATCH 0286/1534] New translations arbitrum-faq.mdx (Chinese Simplified) --- website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx index 1696ca24d7fe..cc912a21a269 100644 --- a/website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/zh/archived/arbitrum/arbitrum-faq.mdx @@ -16,7 +16,7 @@ By scaling The Graph on L2, network participants can now benefit from: Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. -去年,The Graph社区在[GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) 讨论的结果之后,决定推进Arbitrum。 +去年,Graph社区在[GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) 讨论的结果之后,决定推进Arbitrum。 ## 我需要做什么才能在L2上使用Graph? @@ -49,7 +49,7 @@ All indexing rewards are now entirely on Arbitrum. All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -所有事项已经经过了彻底测试,并制定了应急计划,以确保安全和无缝过渡。详细信息可以在这里 [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20)找到。 +所有事项已经经过了彻底测试,并制定了应急计划,以确保安全和无缝过渡。详细信息可以在 [这里] (https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20)找到。 ## Are existing subgraphs on Ethereum working? @@ -57,7 +57,7 @@ All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/arc ## Does GRT have a new smart contract deployed on Arbitrum? -是的,GRT在Arbitrum上有一个额外的智能合约[smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7)。然而,以太坊主网上的GRT合约 [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7)将继续保持运营。 +是的,GRT在Arbitrum上有一个额外的[智能合约](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7)。然而,以太坊主网上的 [GRT合约](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7)将继续保持运营。 ## Arbitrum账单常见问题解答 @@ -71,7 +71,7 @@ All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/arc ## 怎么知道Arbitrum 跨链桥是安全的? -为确保所有用户的安全性和安全性,该跨链桥经过了严格的审计[heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest)。 +为确保所有用户的安全性和安全性,该跨链桥经过了[严格的审计](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest)。 ## 如果从以太坊主网钱包添加新的GRT,需要做什么? From 557c565012052ea72606f62f14acde486d6f7c9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:39 -0500 Subject: [PATCH 0287/1534] New translations arbitrum-faq.mdx (Marathi) --- website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx index 689ac37ab7e1..562824e64e95 100644 --- a/website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/mr/archived/arbitrum/arbitrum-faq.mdx @@ -39,7 +39,7 @@ To take advantage of using The Graph on L2, use this dropdown switcher to toggle ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## सबग्राफ डेव्हलपर, डेटा कंझ्युमर, इंडेक्सर, क्युरेटर किंवा डेलिगेटर म्हणून, मला आता काय करावे लागेल? +## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. @@ -73,7 +73,7 @@ All GRT billing balances have already been successfully migrated to Arbitrum. Yo The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. -## मी माझ्या इथरियम मेननेट वॉलेटमधून नवीन GRT जोडत असल्यास मला काय करावे लागेल? +## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. From ad4502e11d1716ac6085e1ff60337749cff20029 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:40 -0500 Subject: [PATCH 0288/1534] New translations arbitrum-faq.mdx (Hindi) --- .../hi/archived/arbitrum/arbitrum-faq.mdx | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/website/src/pages/hi/archived/arbitrum/arbitrum-faq.mdx b/website/src/pages/hi/archived/arbitrum/arbitrum-faq.mdx index 7cd07febfc64..8f2c53011c62 100644 --- a/website/src/pages/hi/archived/arbitrum/arbitrum-faq.mdx +++ b/website/src/pages/hi/archived/arbitrum/arbitrum-faq.mdx @@ -6,7 +6,7 @@ Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitru ## The Graph ने L2 समाधान को लागू करने का कारण क्या था? -L2 पर The Graph को स्केल करके, नेटवर्क के प्रतिभागी अब निम्नलिखित लाभ उठा सकते हैं: + L2 पर The Graph को स्केल करके, नेटवर्क के प्रतिभागी अब निम्नलिखित लाभ उठा सकते हैं: - Upwards of 26x savings on gas fees @@ -18,13 +18,13 @@ L2 पर प्रोटोकॉल स्मार्ट कॉन्ट् The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. -## L2 पर ग्राफ़ का उपयोग करने के लिए मुझे क्या करना होगा? +## What do I need to do to use The Graph on L2? -The Graph का बिलिंग सिस्टम Arbitrum पर GRT स्वीकार करता है, और उपयोगकर्ताओं को गैस के लिए Arbitrum पर ETHकी आवश्यकता होगी। जबकि The Graph प्रोटोकॉल Ethereum Mainnet पर शुरू हुआ था, अब सभी गतिविधियाँ, जिसमें बिलिंग कॉन्ट्रैक्ट्स शामिल हैं, Arbitrum One पर हैं। +The Graph का बिलिंग सिस्टम Arbitrum पर GRT को स्वीकार करता है, और उपयोगकर्ताओं को गैस के भुगतान के लिए Arbitrum पर ETH की आवश्यकता होगी। जबकि The Graph प्रोटोकॉल Ethereum Mainnet पर शुरू हुआ, सभी गतिविधियाँ, जिसमें बिलिंग कॉन्ट्रैक्ट्स भी शामिल हैं, अब Arbitrum One पर हैं। अत: क्वेरीज़ के लिए भुगतान करने के लिए, आपको Arbitrum पर GRT की आवश्यकता है। इसे प्राप्त करने के कुछ विभिन्न तरीके यहाँ दिए गए हैं: -- यदि आपके पास पहले से Ethereum पर GRT है, तो आप इसे Arbitrum पर ब्रिज कर सकते हैं। आप इसे Subgraph Studio में उपलब्ध GRT ब्रिजिंग विकल्प के माध्यम से या निम्नलिखित में से किसी एक ब्रिज का उपयोग करके कर सकते हैं: +- यदि आपके पास पहले से Ethereum पर GRT है, तो आप इसे Arbitrum पर ब्रिज कर सकते हैं। आप यह Subgraph Studio में प्रदान किए गए GRT ब्रिजिंग विकल्प के माध्यम से या निम्नलिखित में से किसी एक ब्रिज का उपयोग करके कर सकते हैं: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) @@ -35,25 +35,26 @@ The Graph का बिलिंग सिस्टम Arbitrum पर GRT स एक बार जब आपके पास Arbitrum पर GRT हो, तो आप इसे अपनी बिलिंग बैलेंस में जोड़ सकते हैं। -L2 पर द ग्राफ़ का उपयोग करने का लाभ उठाने के लिए, इस ड्रॉपडाउन स्विचर का उपयोग जंजीरों के बीच टॉगल करने के लिए करें। +To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) ## Subgraph developer, data consumer, Indexer, Curator, or Delegator, के रूप में, मुझे अब क्या करने की आवश्यकता है? -नेटवर्क प्रतिभागियों को The Graph Network में भाग लेना जारी रखने के लिए Arbitrum पर स्थानांतरित होना होगा। अतिरिक्त सहायता के लिए कृपया [L2 ट्रांसफर टूल गाइड](/archived/arbitrum/l2-transfer-tools-guide/) देखें। +The Graph Network में भाग लेने के लिए नेटवर्क प्रतिभागियों को Arbitrum पर स्थानांतरित होना आवश्यक है। अतिरिक्त सहायता के लिए कृपया [L2 Transfer Tool मार्गदर्शक](/archived/arbitrum/l2-transfer-tools-guide/) देखें। अब सभी इंडेक्सिंग पुरस्कार पूरी तरह से Arbitrum पर हैं। ## क्या नेटवर्क को L2 पर स्केल करने से संबंधित कोई जोखिम थे? -सभी स्मार्ट कॉन्ट्रैक्ट्स का पूरी तरह से परीक्षित किया गया है। (https://github.com/graphprotocol/contracts/blob/main/packages/contracts/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +सभी स्मार्ट कॉन्ट्रैक्ट्स का पूरी तरह से परीक्षित किया गया है। +(https://github.com/graphprotocol/contracts/blob/main/packages/contracts/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). हर चीज़ का पूरी तरह से परीक्षण किया गया है, और एक सुरक्षित और निर्बाध संक्रमण सुनिश्चित करने के लिए एक आकस्मिक योजना बनाई गई है। विवरण यहां पाया जा सकता है [here] (https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and- सुरक्षा-विचार-20). ## क्या Ethereum पर मौजूद सबग्राफ़ काम कर रहे हैं? -अब सभी सबग्राफ़ Arbitrum पर हैं। सुनिश्चित करें कि आपके सबग्राफ़ सुचारू रूप से कार्य करते हैं, कृपया [L2 ट्रांसफर टूल गाइड](/archived/arbitrum/l2-transfer-tools-guide/) देखें। +सभी सबग्राफ अब Arbitrum पर हैं। कृपया [ L2 Transfer Tool मार्गदर्शक](/archived/arbitrum/l2-transfer-tools-guide/) का संदर्भ लें ताकि आपके सबग्राफ बिना किसी समस्या के कार्य करें। ## क्या GRT का एक नया स्मार्ट कॉन्ट्रैक्ट Arbitrum पर तैनात किया गया है? @@ -61,20 +62,20 @@ Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/addr ## Arbitrum पर बिलिंग FAQs -## मुझे अपने बिलिंग बैलेंस में GRT के बारे में क्या करना होगा? +## What do I need to do about the GRT in my billing balance? -कुछ नहीं! आपके जीआरटी को आर्बिट्रम में सुरक्षित रूप से माइग्रेट कर दिया गया है और जब आप इसे पढ़ रहे हैं तो इसका उपयोग प्रश्नों के भुगतान के लिए किया जा रहा है। +Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. -## मुझे कैसे पता चलेगा कि मेरे फंड आर्बिट्रम में सुरक्षित रूप से माइग्रेट हो गए हैं? +## How do I know my funds have migrated securely to Arbitrum? सभी जीआरटी बिलिंग शेष पहले ही सफलतापूर्वक आर्बिट्रम में स्थानांतरित कर दिए गए हैं। आप आर्बिट्रम पर बिलिंग अनुबंध [यहां] [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a) देख सकते हैं। -## मुझे कैसे पता चलेगा कि आर्बिट्रम ब्रिज सुरक्षित है? +## How do I know the Arbitrum bridge is secure? The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. -## अगर मैं अपने एथेरियम मेननेट वॉलेट से ताजा जीआरटी जोड़ रहा हूं तो मुझे क्या करना होगा? +## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? आपके आर्बिट्रम बिलिंग बैलेंस में जीआरटी जोड़ना [सबग्राफ स्टूडियो] (https://thegraph.com/studio/) में एक-क्लिक अनुभव के साथ किया जा सकता है। आप आसानी से अपने जीआरटी को आर्बिट्रम से जोड़ सकेंगे और एक लेनदेन में अपनी एपीआई कुंजी भर सकेंगे। -कृपया अधिक विस्तृत निर्देशों के लिए [Billing page](/subgraphs/billing/) पर जाएं, जहाँ GRT जोड़ने, निकालने या प्राप्त करने के तरीके बताए गए हैं। +अधिक विस्तृत निर्देशों के लिए [ बिलिंग पेज](/subgraphs/billing/) पर जाएं कि GRT जोड़ने, निकालने या प्राप्त करने के लिए क्या करना है। From 2e49ff3f0f7daa833bd85556b93879ca726e69e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:42 -0500 Subject: [PATCH 0289/1534] New translations l2-transfer-tools-faq.mdx (French) --- .../arbitrum/l2-transfer-tools-faq.mdx | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-faq.mdx index 82442c2246ba..0f138e068e3b 100644 --- a/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ Ces outils vous obligeront à suivre un ensemble d'étapes spécifiques en fonct ### Puis-je utiliser le même portefeuille que celui que j'utilise sur le réseau principal Ethereum ? -Si vous utilisez un portefeuille [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), vous pouvez utiliser la même adresse. Si votre portefeuille du mainnet Ethereum est un contrat (par exemple un multisig), vous devez alors spécifier une [adresse du portefeuille Arbitrum](/archived/arbitrum/arbitrum-faq/#what-do-i-deed-to-do-to-use-the- graph-on-l2) où votre virement sera envoyé. Veuillez vérifier attentivement l'adresse car tout transfert vers une adresse incorrecte peut entraîner une perte permanente. Si vous souhaitez utiliser un multisig sur L2, veillez à déployer un contrat multisig sur Arbitrum One. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Les portefeuilles sur les blockchains EVM comme Ethereum et Arbitrum sont une paire de clés (publiques et privées) que vous créez sans avoir besoin d'interagir avec la blockchain. Ainsi, tout portefeuille créé pour Ethereum fonctionnera également sur Arbitrum sans action supplémentaire. @@ -109,7 +109,7 @@ Les frais de gaz sur Arbitrum One sont payés en utilisant l'ETH bridgé (c'est- Pour transférer votre délégation, complétez les étapes suivantes : 1. Lancer un transfert de délégation sur le mainnet Ethereum -2. Attender 20 minutes pour la confirmation +2. Attendre 20 minutes pour une confirmation 3. Confirmer le transfert de délégation sur Arbitrum \*\*\*\*Vous devez confirmer la transaction pour compléter le transfert de la délégation sur Arbitrum. Cette étape doit être réalisée dans les 7 jours, sinon la délégation pourrait être perdue. Dans la plupart des cas, cette étape se déroule automatiquement, mais une confirmation manuelle peut être nécessaire en cas de hausse du prix du gaz sur Arbitrum. Si vous rencontrez des problèmes au cours de ce processus, il y aura des ressources pour vous aider : contactez le support à support@thegraph.com ou sur [Discord](https://discord.gg/graphprotocol). @@ -202,7 +202,7 @@ Pour transférer votre curation, vous devrez compléter les étapes suivantes : 2. Spécifiez une adresse de curateur L2\* -3. Attendez 20 minutes pour confirmer +3. Attendre 20 minutes pour une confirmation \*Si nécessaire, c'est-à-dire que vous utilisez une adresse contractuelle. @@ -234,7 +234,7 @@ Pour transférer votre participation, vous devrez suivre les étapes suivantes  1. Initier un transfert de participation sur Ethereum mainnet -2. Attendre 20 minutes pour confirmer +2. Attendre 20 minutes pour une confirmation 3. Confirmer le transfert de participation sur Arbitrum @@ -292,7 +292,7 @@ Pour transférer votre vesting, vous devez suivre les étapes suivantes : 1. Initier le transfert de vesting sur Ethereum mainnet -2. Attendre 20 minutes pour confirmer +2. Attendre 20 minutes pour une confirmation 3. Confirmer le transfert de vesting dans Arbitrum @@ -323,7 +323,7 @@ Pour ceux qui ont acquis la totalité de leurs droits, le processus est similair 3. Envoyez votre participation/délégation à L2 via les fonctions de l'outil de transfert « verrouillées » dans le contrat de Staking L1. -4. Retirez tout ETH restant du contrat de l'outil de transfert +4. Retirer tout ETH restant du contrat d’outil de transfert ### Puis-je transférer mon contrat de vesting à Arbitrum ? @@ -339,11 +339,13 @@ Si vous n’avez transféré aucun solde de contrat de vesting à L2 et que votr ### J’utilise mon contrat de vesting pour investir dans mainnet. Puis-je transférer ma participation à Arbitrum? -Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat d’acquisition L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat d’acquisition dans Explorer. Si votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. +Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat d’acquisition L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat d’acquisition dans Explorer. Si +votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. ### J’utilise mon contrat de vesting pour déléguer sur mainnet. Puis-je transférer mes délégations à Arbitrum? -Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat de vesting L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat de vesting dans Explorer. Si votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. +Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat de vesting L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat de vesting dans Explorer. Si +votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. ### Puis-je spécifier un bénéficiaire différent pour mon contrat de vesting sur L2? From 8b288195b0f0338badbd14fb059570e13a05d07d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:44 -0500 Subject: [PATCH 0290/1534] New translations l2-transfer-tools-faq.mdx (Arabic) --- .../src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx index 07cfffe96b4c..9c949027b41f 100644 --- a/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ These tools will require you to follow a specific set of steps depending on what ### هل يمكنني استخدام نفس المحفظة التي استخدمها في Ethereum mainnet؟ -إذا كنت تستخدم محفظة [EOA] (https://ethereum.org/en/developers/docs/accounts/#types-of-account) ، فيمكنك استخدام نفس العنوان. إذا كانت محفظة Ethereum mainnet الخاصة بك عبارة عن عقد (مثل multisig) ، فيجب عليك تحديد [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) حيث سيتم إرسال التحويل الخاص بك. يرجى التحقق من العنوان بعناية لأن أي تحويلات إلى عنوان غير صحيح يمكن أن تؤدي إلى خسارة غير قابلة للرجوع. إذا كنت ترغب في استخدام multisig على L2 ، فتأكد من نشر عقد multisig على Arbitrum One. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. From 4b9fa1061301727d0eef6fa5d49362fcb047fc9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:45 -0500 Subject: [PATCH 0291/1534] New translations l2-transfer-tools-faq.mdx (Czech) --- .../src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx index c7f6fe044e26..88e1d9e632a2 100644 --- a/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ Tyto nástroje vyžadují, abyste provedli určitý soubor kroků v závislosti ### Mohu používat stejnou peněženku, kterou používám na mainnet Ethereum? -Pokud používáte peněženku [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), můžete použít stejnou adresu. Pokud je vaše peněženka Ethereum mainnet kontraktem (např. multisig), musíte zadat adresu [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2), kam bude váš převod odeslán. Pečlivě zkontrolujte adresu, protože převody na nesprávnou adresu mohou mít za následek trvalou ztrátu. Pokud chcete používat multisig na L2, ujistěte se, že jste nasadili multisig smlouvu na Arbitrum One. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Peněženky na blockchainech EVM, jako jsou Ethereum a Arbitrum, představují dvojici klíčů (veřejný a soukromý), které vytvoříte bez nutnosti interakce s blockchainem. Takže jakákoli peněženka vytvořená pro Ethereum bude fungovat i pro Arbitrum, aniž byste museli dělat cokoli jiného. From 9cedbab56ba59edda05a77b2cbd4e31545b218b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:46 -0500 Subject: [PATCH 0292/1534] New translations l2-transfer-tools-faq.mdx (German) --- .../src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx index 86d1c403d92f..e99eb29ec0d5 100644 --- a/website/src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/de/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ Für diese Tools müssen Sie eine Reihe von Schritten befolgen, je nachdem, welc ### Kann ich dieselbe Wallet verwenden, die ich im Ethereum Mainnet benutze? -Wenn Sie eine [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) Wallet verwenden, können Sie dieselbe Adresse verwenden. Wenn Ihr Ethereum Mainnet Wallet ein Kontrakt ist (z.B. ein Multisig), dann müssen Sie eine [Arbitrum Wallet Adresse](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) angeben, an die Ihr Transfer gesendet wird. Bitte überprüfen Sie die Adresse sorgfältig, da Überweisungen an eine falsche Adresse zu einem dauerhaften Verlust führen können. Wenn Sie einen Multisig auf L2 verwenden möchten, stellen Sie sicher, dass Sie einen Multisig-Vertrag auf Arbitrum One einsetzen. +Wenn Sie eine [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account)-Wallet verwenden, können Sie dieselbe Adresse verwenden. Wenn Ihre Ethereum Mainnet Wallet ein Vertrag ist (z.B. eine Multisig), dann müssen Sie eine [Arbitrum Wallet Adresse](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) angeben, an die Ihr Transfer gesendet wird. Bitte überprüfen Sie die Adresse sorgfältig, da Überweisungen an eine falsche Adresse zu einem dauerhaften Verlust führen können. Wenn Sie einen Multisig auf L2 verwenden möchten, stellen Sie sicher, dass Sie einen Multisig-Vertrag auf Arbitrum One einsetzen. Wallets auf EVM-Blockchains wie Ethereum und Arbitrum bestehen aus einem Paar von Schlüsseln (öffentlich und privat), die Sie erstellen, ohne mit der Blockchain interagieren zu müssen. Jede Wallet, die für Ethereum erstellt wurde, funktioniert also auch auf Arbitrum, ohne dass Sie etwas anderes tun müssen. @@ -64,7 +64,7 @@ Die Übertragungszeit beträgt etwa 20 Minuten. Die Arbitrum-Brücke arbeitet im ### Wird mein Subgraph noch auffindbar sein, nachdem ich ihn auf L2 übertragen habe? -Ihr Subgraph ist nur in dem Netzwerk auffindbar, in dem er veröffentlicht ist. Wenn Ihr Subgraph zum Beispiel auf Arbitrum One ist, können Sie ihn nur im Explorer auf Arbitrum One finden und nicht auf Ethereum. Bitte vergewissern Sie sich, dass Sie Arbitrum One in der Netzwerkumschaltung oben auf der Seite ausgewählt haben, um sicherzustellen, dass Sie sich im richtigen Netzwerk befinden. Nach der Übertragung wird der L1-Subgraph als veraltet angezeigt. +Ihr Subgraph ist nur in dem Netzwerk auffindbar, in dem er veröffentlicht ist. Wenn Ihr Subgraph zum Beispiel auf Arbitrum One ist, können Sie ihn nur im Explorer auf Arbitrum One finden und nicht auf Ethereum. Bitte vergewissern Sie sich, dass Sie Arbitrum One in der Netzwerkumschaltung oben auf der Seite ausgewählt haben, um sicherzustellen, dass Sie sich im richtigen Netzwerk befinden. Nach der Übertragung wird der L1-Subgraph als veraltet angezeigt. ### Muss mein Subgraph ( Teilgraph ) veröffentlicht werden, um ihn zu übertragen? From ff9534a1794164a5fe6dd90485d69ceb77e09aa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:47 -0500 Subject: [PATCH 0293/1534] New translations l2-transfer-tools-faq.mdx (Italian) --- .../src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx index 780a8eb1a062..bc5a9ac711c5 100644 --- a/website/src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/it/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -2,7 +2,7 @@ title: L2 Transfer Tools FAQ --- -## Generale +## General ### Cosa sono gli Strumenti di Trasferimento L2? @@ -14,7 +14,7 @@ These tools will require you to follow a specific set of steps depending on what ### Posso usare lo stesso wallet utilizzato su Ethereum mainnet? -Se stai usando un wallet [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), puoi utilizzare lo stesso address. Se il tuo wallet su Ethereum mainnet è un contratto (ad esempio un multisig), allora dovrai specificare un [wallet address Arbitrum](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) a cui verrà inviato il trasferimento. Controlla attentamente l'address poiché qualsiasi trasferimento a un indirizzo errato può comportare una perdita permanente. Se desideri utilizzare un multisig su L2, assicurati di deployare un contratto multisig su Arbitrum One. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. From f996e06666e368ef0425170f3cacca6bb12de622 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:48 -0500 Subject: [PATCH 0294/1534] New translations l2-transfer-tools-faq.mdx (Japanese) --- .../src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx index ec2e400a7cb9..70999970ca9a 100644 --- a/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ The Graphは、プロトコルをArbitrum Oneに展開することで、コン ### イーサリアムメインネットで使用しているのと同じウォレットを使用できますか? -[EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) ウォレットを使用している場合は、同じアドレスを使用できます。 Ethereum メインネット ウォレットがコントラクト (マルチシグなど) の場合、[Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the- graph-on-l2) 転送が送信される場所。 間違ったアドレスへの転送は永久的な損失につながる可能性があるため、アドレスを注意深く確認してください。 L2 でマルチシグを使用したい場合は、必ず Arbitrum One にマルチシグ コントラクトを展開してください。 +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. EthereumやArbitrumのようなEVMブロックチェーン上のウォレットは、ブロックチェーンとやり取りすることなく作成できる一対の鍵(公開鍵と秘密鍵)です。そのため、イーサリアム用に作成されたウォレットは、Arbitrum上でも何もすることなく動作します。 From 2102e3b932126a58513212b41d9c08183dfc3df9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:49 -0500 Subject: [PATCH 0295/1534] New translations l2-transfer-tools-faq.mdx (Korean) --- .../pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx index 86c77a2ed290..d9ab903eac38 100644 --- a/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ko/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ These tools will require you to follow a specific set of steps depending on what ### 이더리움 메인넷에서 사용하는 지갑을 그대로 사용할 수 있나요? -당신이 사용하는 경우 [EOA] (https://ethereum.org/ko/개발자/문서/계정/#계정-유형) 만약 당신의 이더리움 메인넷 지갑이 단순한 지갑이라면, 당신은 동일한 주소를 사용할 수 있습니다. 만약 당신의 이더리움 메인넷 지갑이 계약(예: 다중 서명 지갑)인 경우, 당신은 당신의 이체가 전송될 Arbitrum 지갑 주소: /archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2를 지정해야 합니다. 잘못된 주소로의 이체는 영구적인 손실을 초래할 수 있으므로 주소를 주의깊게 확인하십시오. 만약 당신이 L2에서 다중 서명 지갑을 사용하고 싶다면, 반드시 Arbitrum One에 다중 서명 계약을 배포하십시오. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. @@ -22,7 +22,8 @@ The exception is with smart contract wallets like multisigs: these are smart con ### 만약 7일 안에 이체를 완료하지 못하면 어떻게 되나요? -L2 전송 도구는 Arbitrum의 기본 메커니즘을 사용하여 L1에서 L2로 메시지를 보냅니다. 이 메커니즘은 "재시도 가능한 티켓"이라고 하며 Arbitrum GRT 브리지를 포함한 모든 네이티브 토큰 브리지를 사용하여 사용됩니다. 재시도 가능한 티켓에 대해 자세히 읽을 수 있습니다 [Arbitrum 문서] (https://docs.arbitrum.io/arbos/l1-to-l2-messaging). +L2 전송 도구는 Arbitrum의 기본 메커니즘을 사용하여 L1에서 L2로 메시지를 보냅니다. 이 메커니즘은 "재시도 가능한 티켓"이라고 하며 Arbitrum GRT 브리지를 포함한 모든 네이티브 토큰 브리지를 사용하여 사용됩니다. 재시도 가능한 티켓에 대해 자세히 읽을 수 있습니다 [Arbitrum 문서] +(https://docs.arbitrum.io/arbos/l1-to-l2-messaging). 자산(하위 그래프, 스테이크, 위임 또는 큐레이션) 을 L2로 이전하면 L2에서 재시도 가능한 티켓을 생성하는 Arbitrum GRT 브리지를 통해 메시지가 전송됩니다. 전송 도구에는 거래에 일부 ETH 값이 포함되어 있으며, 이는 1) 티켓 생성 비용을 지불하고 2) L2에서 티켓을 실행하기 위해 가스 비용을 지불하는 데 사용됩니다. 그러나 티켓이 L2에서 실행될 준비가 될 때까지 가스 가격이 시간에 따라 달라질 수 있으므로 이 자동 실행 시도가 실패할 수 있습니다. 그런 일이 발생하면 Arbitrum 브릿지는 재시도 가능한 티켓을 최대 7일 동안 유지하며 누구나 티켓 "사용"을 재시도할 수 있습니다(Arbitrum에 브릿지된 일부 ETH가 있는 지갑이 필요함). @@ -40,6 +41,8 @@ If you have the L1 transaction hash (which you can find by looking at the recent + + 1. 이더리움 메인넷에서 전송 시작 2. 확인을 위해 20분 정도 기다리세요 From 9eca09326f71011fdbf73c976f2d844fdddc29c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:51 -0500 Subject: [PATCH 0296/1534] New translations l2-transfer-tools-faq.mdx (Polish) --- .../src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx index df0e6e747df9..c7f851bd8d87 100644 --- a/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ These tools will require you to follow a specific set of steps depending on what ### Czy mogę używać tego samego portfela, którego używam w mainnecie Ethereum? -Jeśli korzystasz z portfela [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), możesz użyć tego samego adresu. Jeśli portfel głównej sieci Ethereum jest kontraktem (np. multisig), musisz podać [Adres portfela Arbitrum](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-use-the-graph-on-l2), na który zostanie dokonany przelew. Prosimy o dokładne sprawdzenie adresu, ponieważ wszelkie przelewy na nieprawidłowy adres mogą spowodować ich trwałą utratę. Jeśli chcesz korzystać z multisig na L2, upewnij się, że wdrożyłeś kontrakt multisig na Arbitrum One. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. From b41f0c41816d8233123d71d19ea86fe0119240ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:52 -0500 Subject: [PATCH 0297/1534] New translations l2-transfer-tools-faq.mdx (Portuguese) --- .../pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx index feb9264ebc45..d542d643adc4 100644 --- a/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ Estas ferramentas exigem aderência a um conjunto específico de passos a depend ### Posso usar a mesma carteira que uso na mainnet do Ethereum? -Se usa uma carteira [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), você poderá usar o mesmo endereço. Se a sua carteira na mainnet do Ethereum for um contrato (uma multisig, por ex.), então deve ser especificado um [endereço de carteira no Arbitrum](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) para onde a sua transferência será enviada. Por favor, tenha cuidado ao conferir o endereço, pois transferir a um endereço errado pode causar prejuízos permanentes. Se quiser usar uma multisig na L2, lance um contrato multisig no Arbitrum One. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Carteiras em blockchains EVM como Ethereum e Arbitrum são um par de chaves (pública e privada), que você cria sem precisar interagir com a blockchain. Qualquer carteira criada para o Ethereum também funcionará no Arbitrum sem ter que fazer qualquer outra coisa. @@ -317,13 +317,13 @@ Para transferir o seu vesting, complete os seguintes passos: Para quem estiver totalmente vestido, o processo não é muito diferente: -1. Deposite ETH no contrato da ferramenta de transferência (a interface pode estimar uma quantidade razoável) +1. Deposite ETH no contrato da ferramenta de transferência (a interface pode ajudar a estimar uma quantia razoável) 2. Determine o seu endereço na L2 com uma chamada ao contrato da ferramenta de transferência 3. Envie o seu stake/a sua delegação à L2 através das funções "bloqueadas" da ferramenta de transferência no contrato de staking na L1. -4. Saque qualquer quantia de ETH que restar do contrato da ferramenta de transferência +4. Saque qualquer quantia restante de ETH do contrato da ferramenta de transferência ### Posso transferir o meu contrato de vesting ao Arbitrum? From 1b1b884a15fc278c73641c7598db700d752591d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:53 -0500 Subject: [PATCH 0298/1534] New translations l2-transfer-tools-faq.mdx (Russian) --- .../src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx index 1ff945a642d7..ebb1f3b1b165 100644 --- a/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ The Graph удешевил участие контрибьюторов в сет ### Могу ли я использовать тот же кошелек, что и в основной сети Ethereum? -Если Вы используете кошелек [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), Вы можете использовать тот же адрес. Если Ваш кошелек основной сети Ethereum является контрактным (например, кошелек с мультиподписью), то Вы должны указать адрес [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2), на который будет отправлен Ваш перевод. Пожалуйста, внимательно проверяйте адрес, так как перевод на неправильный адрес может привести к необратимой потере средств. Если Вы хотите использовать кошелек с мультиподписью на L2, убедитесь, что развернули multisig-контракт на Arbitrum One. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Кошельки на блокчейнах EVM, таких как Ethereum и Arbitrum, представляют собой пару ключей (публичный и приватный), которые Вы создаете без необходимости взаимодействия с блокчейном. Таким образом, любой кошелек, созданный для Ethereum, также будет работать на Arbitrum без необходимости выполнения дополнительных действий. From ffd54f6b2fafcee298814691e17b6e60c6ebeded Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:55 -0500 Subject: [PATCH 0299/1534] New translations l2-transfer-tools-faq.mdx (Swedish) --- .../src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx index 3dfe0794d1f4..b158efaed6ff 100644 --- a/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ Dessa verktyg kommer att kräva att du följer en specifik uppsättning steg ber ### Kan jag använda samma plånbok som jag använder på Ethereum huvudnätet? -Om du använder en [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account)-plånbok kan du använda samma adress. Om din Ethereum huvudnät plånbok är en kontrakt (t.ex. en multisig) måste du specificera en [Arbitrum plånboksadress](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) där din överföring kommer att skickas. Kontrollera adressen noggrant eftersom felaktiga överföringar till felaktig adress kan resultera i permanent förlust. Om du vill använda en multisig på L2 måste du se till att du implementerar en multisig-kontrakt på Arbitrum One. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. From 7095130a9608a51b6f38a6d68f12e7d238146c9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:56 -0500 Subject: [PATCH 0300/1534] New translations l2-transfer-tools-faq.mdx (Turkish) --- .../src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx index 71dddb2e9dc2..709689c6ca55 100644 --- a/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ Bu araçlar, Graph içindeki rolünüzün ne olduğuna ve Katman2'ye ne transfer ### Ethereum ana ağında kullandığım aynı cüzdanı kullanabilir miyim? -Eğer bir [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) (Harici Olarak Sahip Olunan Hesap) cüzdanı kullanıyorsanız aynı adresi kullanabilirsiniz. Ethereum ana ağ cüzdanınız bir sözleşme ise (örneğin bir çoklu imza), transferinizin gönderileceği bir [Arbitrum cüzdan adresi] (/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) belirtmelisiniz. Yanlış bir adrese yapılan transferler kalıcı kayıplara neden olabileceğinden lütfen adresi dikkatlice kontrol edin. Katman2'de bir çoklu imza cüzdanı kullanmak istiyorsanız, Arbitrum One'da bir çoklu imza sözleşmesi kurduğunuzdan emin olun. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Ethereum ve Arbitrum gibi EVM blok zincirlerindeki cüzdanlar, blok zinciri ile etkileşime girmenize gerek kalmadan oluşturduğunuz bir çift anahtardır (genel ve özel). Dolayısıyla, Ethereum için oluşturulan herhangi bir cüzdan, başka bir işlem yapmanıza gerek kalmadan Arbitrum üzerinde de çalışacaktır. From 8539d7c15c8071498b8f1d16aee13cc4490b8106 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:58 -0500 Subject: [PATCH 0301/1534] New translations l2-transfer-tools-faq.mdx (Chinese Simplified) --- .../pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx index 185afab40151..5ee091bbc5a3 100644 --- a/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -6,15 +6,15 @@ title: L2转移工具常见问题解答 ### 什么是L2转移工具? -The Graph 在将协议部署到 Arbitrum One 后,使参与者参与网络的成本降低了 26 倍。L2 转账工具是核心开发人员创建的,旨在简化转移到 Layer 2(二层网络)的过程。 +在将协议部署到 Arbitrum One 后,Graph 使参与者参与网络的成本降低了 26 倍。L2 转账工具是核心开发人员创建的,旨在简化转移到 Layer 2(二层网络)的过程。 对于每个网络参与者,都提供了一套L2转账工具,以实现无缝的L2转移体验,避免解冻期或手动提取和桥接GRT代币。 -这些工具将要求您根据您在 The Graph 中的角色以及您要转移到 Layer 2 的内容,遵循一套特定的步骤。 +这些工具将要求您根据您在 Graph 中的角色以及您要转移到 Layer 2 的内容,遵循一套特定的步骤。 -### 我可以在以太坊主网上使用相同的钱包吗? +### 我可以使用与以太坊主网上相同的钱包吗? -如果您使用的是 [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account)钱包,则可以使用相同的地址。如果您在以太坊主网上的钱包是智能合约钱包(例如多签钱包),那么您必须指定一个Arbitrum钱包地址,用于接收您的转账。请仔细检查地址,因为发送到错误地址的任何转账都可能导致永久丢失。如果您希望在L2上使用多签钱包,请确保在Arbitrum One上部署一个多签合约。 +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. 在像以太坊和 Arbitrum 这样的 EVM 区块链上,钱包是一对密钥(公钥和私钥),您可以在不需要与区块链进行任何交互的情况下创建。因此,任何在以太坊上创建的钱包也将在 Arbitrum 上运作,而无需采取其他任何行动。 From 8829a86b231069910fa503d6345f35684339be55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:48:59 -0500 Subject: [PATCH 0302/1534] New translations l2-transfer-tools-faq.mdx (Urdu (Pakistan)) --- .../src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx index f56464a49a66..ce46b35ce79b 100644 --- a/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -15,7 +15,7 @@ These tools will require you to follow a specific set of steps depending on what ### کیا میں وہی والیٹ استعمال کر سکتا ہوں جو میں ایتھیریم مین نیٹ پر استعمال کرتا ہوں؟ -اگر آپ ایک [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) والیٹ استعمال کر رہے ہیں تو آپ وہی ایڈریس استعمال کر سکتے ہیں۔ اگر آپ کا ایتھیریم مین نیٹ والیٹ ایک کنٹریکٹ ہے (مثلاً ایک ملٹی سگ) تو آپ کو [آربٹرم والیٹ ایڈریس](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) کی وضاحت کرنا ہوگی جہاں آپ کی منتقلی بھیجی جائے گی۔ براہ کرم ایڈریس کو احتیاط سے چیک کریں کیونکہ کسی بھی غلط ایڈریس پر منتقلی کے نتیجے میں مستقل نقصان ہو سکتا ہے۔ اگر آپ L2 پر ملٹی سگ استعمال کرنا چاہتے ہیں، تو یقینی بنائیں کہ آپ Arbitrum One پر ملٹی سگ کنٹریکٹ تعینات کرتے ہیں. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. From e81c832bddd3faef8a5f7972c5f3d59bfa18c87d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:01 -0500 Subject: [PATCH 0303/1534] New translations l2-transfer-tools-faq.mdx (Marathi) --- .../src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx index 4d4a36b36f22..b6ee08a5bbed 100644 --- a/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ These tools will require you to follow a specific set of steps depending on what ### मी इथरियम मेननेटवर वापरतो तेच वॉलेट मी वापरू शकतो का? -आपल्याला [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) वॉलेट वापरत असल्यास, आपल्याला त्या एका आधीच्या पत्त्याचा वापर करू शकता. आपल्य्या इथे Ethereum मुख्यनेट वॉलेट कंट्रॅक्ट असल्यास (उदा. मल्टीसिग), तर आपल्याला आपल्या स्थानांतरणाच्या लक्ष्यासाठी [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) सूचित करावा लागेल. कृपया पत्त्याची वाचन सावध घ्या कारण कोणत्याही चुकीच्या पत्त्याला स्थायी नुकसान होऊ शकतो. आपल्याला L2 वर मल्टीसिग वापरायचं असल्यास, कृपया सुनिश्चित करा की आपल्याला Arbitrum One वर मल्टीसिग कॉन्ट्रॅक्ट डिप्लॉय करण्याची आवश्यकता आहे. +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. From 9c4edc6c828ac5c57cddee7415e2d4b955b6e200 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:02 -0500 Subject: [PATCH 0304/1534] New translations l2-transfer-tools-faq.mdx (Hindi) --- .../src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx index 4f0af9de22ba..66574cb53dd4 100644 --- a/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -14,7 +14,7 @@ These tools will require you to follow a specific set of steps depending on what ### Can I use the same wallet I use on Ethereum mainnet? -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +यदि आप [ EOA ] (https://ethereum.org/en/developers/docs/accounts/#types-of-account) वॉलेट का उपयोग कर रहे हैं, तो आप उसी पते का उपयोग कर सकते हैं। यदि आपका Ethereum mainnet वॉलेट एक contract है (जैसे कि एक multisig), तो आपको एक [Arbitrum बटुआ पता ](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) निर्दिष्ट करना होगा जहाँ आपका ट्रांसफर भेजा जाएगा। कृपया पते को ध्यानपूर्वक जांचें, क्योंकि गलत पते पर ट्रांसफर करने से स्थायी हानि हो सकती है। यदि आप L2 पर multisig का उपयोग करना चाहते हैं, तो सुनिश्चित करें कि आपने Arbitrum One पर एक multisig contract तैनात किया हो। Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. From d013506b3699bfe5815a64fd418b3d6eaea389cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:04 -0500 Subject: [PATCH 0305/1534] New translations l2-transfer-tools-guide.mdx (French) --- .../pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx index 290fa2d90cff..6d59607442b4 100644 --- a/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/fr/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Guide des outils de transfert L2 The Graph a facilité le passage à L2 sur Arbitrum One. Pour chaque participant au protocole, il existe un ensemble d'outils de transfert L2 permettant de rendre le transfert vers L2 transparent pour tous les participants du réseau. Ces outils vous obligeront à suivre un ensemble d’étapes spécifiques en fonction de ce que vous transférez. -Certaines questions fréquentes sur ces outils trouvent leur réponse dans la [FAQ sur les outils de transfert L2](/archived/arbitrum/l2-transfer-tools-faq/). Les FAQ contiennent des explications détaillées sur la façon d'utiliser les outils, leur fonctionnement et les éléments à garder à l'esprit lors de leur utilisation. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Comment transférer votre subgraph vers Arbitrum (L2) @@ -148,7 +148,7 @@ Démarrage du transfert : Une fois que vous avez commencé le transfert, le message qui envoie votre curation L1 à L2 doit se propager à travers le pont Arbitrum. Cela prend environ 20 minutes (le pont attend que le bloc du mainnet contenant la transaction soit "à l'abri" d'une éventuelle réorganisation de la chaîne). -Une fois ce temps d'attente passé, Arbitrum tentera d'exécuter automatiquement le transfert sur les contrats L2. +Une fois ce temps d'attente terminé, le réseau Arbitrum tentera d'exécuter automatiquement le transfert sur les contrats L2. ![Envoi du signal de curation à L2](/img/sendingCurationToL2Step2Second.png) @@ -156,7 +156,7 @@ Une fois ce temps d'attente passé, Arbitrum tentera d'exécuter automatiquement Dans la plupart des cas, cette étape s'exécutera automatiquement car le gaz L2 inclus dans l'étape 1 devrait être suffisant pour exécuter la transaction qui reçoit la curation sur les contrats Arbitrum. Dans certains cas, cependant, il est possible qu'une flambée des prix du gaz sur Arbitrum fasse échouer cette exécution automatique. Dans ce cas, le « ticket » qui envoie votre curation vers L2 sera en attente et nécessitera une nouvelle tentative sous 7 jours. -Si c'est le cas, vous devrez vous connecter en utilisant un portefeuille L2 qui a quelques ETH sur Arbitrum, changer le réseau de votre portefeuille pour Arbitrum, et cliquer sur "Confirmer le transfert" pour réessayer la transaction. +Si c'est le cas, vous devrez vous connecter en utilisant un portefeuille L2 qui contient de l'ETH sur Arbitrum, changer le réseau de votre portefeuille vers Arbitrum, et cliquer sur "Confirmer le transfert" pour retenter la transaction. ![Envoyer un signal à L2](/img/L2TransferToolsFinalCurationImage.png) From 84e64edf4ca0733483452510342de91267003f4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:05 -0500 Subject: [PATCH 0306/1534] New translations l2-transfer-tools-guide.mdx (Spanish) --- .../src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx index 9b4199f25eca..4ec61fdc3a7c 100644 --- a/website/src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/es/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Guía de las Herramientas de Transferencia a L2 The Graph ha facilitado la migración a L2 en Arbitrum One. Para cada participante del protocolo, existen un conjunto de herramientas de transferencia a L2 para que la migración sea fluida para todos los participantes de la red. Estas herramientas requerirán que sigas un conjunto específico de pasos dependiendo de lo que estés transfiriendo. -Algunas preguntas frecuentes sobre estas herramientas se responden en las [Preguntas Frecuentes de las Herramientas de Transferencia a L2](/archived/arbitrum/l2-transfer-tools-faq/). Las preguntas frecuentes contienen explicaciones detalladas sobre cómo utilizar las herramientas, cómo funcionan y aspectos a tener en cuenta al usarlas. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Cómo transferir tu subgrafo a Arbitrum (L2) From eaa8fbf220d09ada238f6cd67c898842cdd97c50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:06 -0500 Subject: [PATCH 0307/1534] New translations l2-transfer-tools-guide.mdx (Arabic) --- .../src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx index bf05f92ef144..af5a133538d6 100644 --- a/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: L2 Transfer Tools Guide يسهل الغراف الانتقال إلى الطبقة الثانبة على أربترم. لكل مشارك في البروتوكول ، توجد مجموعة من أدوات نقل الطبقة الثانبة لجعل النقل إليها سلسًا لجميع المشاركين في الشبكة. ستطلب منك هذه الأدوات اتباع مجموعة محددة من الخطوات بناءً على ما تقوم بنقله. -بعض الأسئلة المتكررة حول هذه الأدوات تمت الإجابة عليها في [الأسئلة الشائعة حول أدوات نقل الطبقة الثانية] (/archived/arbitrum/l2-transfer-tools-faq/). تحتوي الأسئلة الشائعة على تفسيرات متعمقة لكيفية استخدام الأدوات وكيفية عملها والأمور التي يجب وضعها في الاعتبار عند إستخدامها. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## كيف تنقل الغراف الفرعي الخاص بك إلى شبكة آربترم (الطبقة الثانية) From 17eb6cdf1a412a5bf0a9887c11f48e148f92b411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:08 -0500 Subject: [PATCH 0308/1534] New translations l2-transfer-tools-guide.mdx (Czech) --- .../src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx index 5acbf6e437d5..69717e46ed39 100644 --- a/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/cs/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Průvodce nástroji pro přenos L2 Graph usnadnil přechod na úroveň L2 v Arbitrum One. Pro každého účastníka protokolu je k dispozici sada nástrojů pro přechod na L2, které umožňují bezproblémový přechod na L2 pro všechny účastníky sítě. Tyto nástroje vyžadují provedení určitého souboru kroků v závislosti na tom, co přenášíte. -Některé časté otázky týkající se těchto nástrojů jsou zodpovězeny v sekci [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). Často kladené dotazy obsahují podrobné vysvětlení, jak nástroje používat, jak fungují a na co je třeba při jejich používání pamatovat. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Jak přenést podgraf do Arbitrum (L2) From c1bc9e34172cd22a77c9237079bf7c166a1d3ab0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:09 -0500 Subject: [PATCH 0309/1534] New translations l2-transfer-tools-guide.mdx (German) --- .../src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx index 8c1b310480c3..6a5b13da53d7 100644 --- a/website/src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/de/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -6,7 +6,7 @@ The Graph hat den Wechsel zu L2 auf Arbitrum One leicht gemacht. Für jeden Prot Einige häufig gestellte Fragen zu diesen Tools werden in den [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/) beantwortet. Die FAQs enthalten ausführliche Erklärungen zur Verwendung der Tools, zu ihrer Funktionsweise und zu den Dingen, die bei ihrer Verwendung zu beachten sind. -## So übertragen Sie Ihren Untergraphen auf Arbitrum (L2) +## So übertragen Sie Ihren Subgraphen auf Arbitrum (L2) From a25e6571340d889eaeb4408b282478de2775680d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:10 -0500 Subject: [PATCH 0310/1534] New translations l2-transfer-tools-guide.mdx (Japanese) --- .../src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx index a380e0b56fdd..b77261989131 100644 --- a/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ja/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: L2 転送ツールガイド グラフにより、アービトラムワンのL2に簡単に移動できるようになりました。プロトコル参加者ごとに、すべてのネットワーク参加者が L2 への転送をシームレスに行うための L2 転送ツールのセットがあります。これらのツールでは、転送する内容に応じて、特定の一連の手順に従う必要があります。 -これらのツールに関するよくある質問は、[L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/) で回答されています。FAQには、ツールの使用方法、機能、およびツールを使用する際の注意事項に関する詳細な説明が含まれています。 +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## サブグラフをアービトラムに転送する方法 (L2) From 357c0c4c33dc0723ce411152c13ba700dba8a469 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:12 -0500 Subject: [PATCH 0311/1534] New translations l2-transfer-tools-guide.mdx (Dutch) --- .../pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx index 5453e26b1aaa..67a7011010e7 100644 --- a/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/nl/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Gids voor L2 Transfer Tools The Graph heeft het eenvoudig gemaakt om naar L2 op Arbitrum One over te stappen. Voor elke deelnemer aan het protocol is er een set L2 Transfer Tools ontwikkeld om het overzetten van GRT naar L2 makkelijk te maken voor alle netwerkdeelnemers. Deze tools vereisen dat u een specifieke reeks stappen volgt, afhankelijk van wat u overdraagt. -Sommige veelgestelde vragen over deze tools worden beantwoord in de [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). De FAQs bevatten diepgaande uitleg over hoe de tools te gebruiken, hoe ze werken, en punten om rekening mee to houden bij het gebruiken ervan. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Hoe zet je je subgraph over naar Arbitrum (L2) @@ -156,7 +156,7 @@ Zodra deze wachttijd voorbij is, zal Arbitrum proberen de transfer automatisch u In de meeste gevallen zal deze stap automatisch worden uitgevoerd aangezien de L2 gas kosten die bij stap 1 zijn inbegrepen, voldoende zouden moeten zijn om de transactie die de curatie op de Arbitrum contracten ontvangt, uit te voeren. In sommige gevallen kan het echter zo zijn dat een piek in de gasprijzen op Arbitrum ervoor zorgt dat deze automatische uitvoering mislukt. In dat geval zal het "ticket" dat je curatie naar L2 stuurt, in behandeling blijven en is nodig het binnen 7 dagen nogmaals te proberen. -Als dit het geval is, moet je verbinding maken met een L2 wallet die wat ETH op Arbitrum heeft, je wallet netwerk naar Arbitrum overschakelen en op "Bevestig Transfer" klikken op de transactie opnieuw te proberen. +Als dit het geval is, moet je verbinding maken met een L2 wallet die wat ETH op Arbitrum heeft, je walletnetwerk naar Arbitrum overschakelen en op "Bevestig Transfer" klikken op de transactie opnieuw te proberen. ![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) From a4df8bd1dcfbefc721f5bee6f10d6ea50fa56c4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:13 -0500 Subject: [PATCH 0312/1534] New translations l2-transfer-tools-guide.mdx (Polish) --- .../src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx index c52407a4ad49..2e4e4050450e 100644 --- a/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/pl/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Przewodnik po narzędziach przesyłania L2 Graph ułatwił przeniesienie danych do L2 na Arbitrum One. Dla każdego uczestnika protokołu dostępny jest zestaw narzędzi do przesyłania na L2, dzięki którym przeniesienie do L2 jest bezproblemowe dla wszystkich uczestników sieci. Narzędzia te wymagają wykonania określonego zestawu kroków w zależności od przenoszonych danych. -Odpowiedzi na najczęściej zadawane pytania dotyczące tych narzędzi znajdują się w [Narzędzia przesyłania L2 FAQ](/archived/arbitrum/l2-transfer-tools-faq/). Odpowiedzi na najczęściej zadawane pytania zawierają szczegółowe wyjaśnienia dotyczące korzystania z narzędzi, ich działania i rzeczy, o których należy pamiętać podczas korzystania z nich. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Jak przenieść swój subgraph do Arbitrum (L2) From 755533f277f2a9f79e53102b96df1be841f0a524 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:14 -0500 Subject: [PATCH 0313/1534] New translations l2-transfer-tools-guide.mdx (Portuguese) --- .../src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx index 6b71561af98e..a6a744aeeb19 100644 --- a/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/pt/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Guia das Ferramentas de Transferência para L2 O The Graph facilitou muito o processo de se mudar para a L2 no Arbitrum One. Para cada participante no protocolo, há um conjunto de Ferramentas de Transferência para L2 que suavizam o processo para todos os participantes na rede. Estas ferramentas exigem que você siga um conjunto específico de passos, dependente no que você transferir. -Algumas perguntas frequentes sobre estas ferramentas são respondidas nas [Perguntas Frequentes das Ferramentas de Transferência para L2](/archived/arbitrum/l2-transfer-tools-faq/). As Perguntas Frequentes contém explicações profundas sobre como usar as ferramentas, como elas funcionam, e coisas a lembrar ao usá-las. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Como transferir o seu subgraph ao Arbitrum (L2) From 688c93d28384a3605666dc092985245a41956b29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:15 -0500 Subject: [PATCH 0314/1534] New translations l2-transfer-tools-guide.mdx (Russian) --- .../src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx index cc2fefe3739c..1dc689d934d3 100644 --- a/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ru/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Руководство по инструментам переноса L2 The Graph упростил переход на L2 в Arbitrum One. Для каждого участника протокола существует набор инструментов переноса L2, чтобы сделать переход на L2 бесшовным для всех участников сети. Эти инструменты потребуют от Вас выполнения определенного набора шагов в зависимости от того, что Вы передаете. -Ответы на некоторые частые вопросы об этих инструментах можно найти в [Часто задаваемые вопросы по инструментам переноса L2](/archived/arbitrum/l2-transfer-tools-faq/). Часто задаваемые вопросы содержат подробные объяснения того, как использовать инструменты, как они работают и что следует учитывать при их использовании. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Как перенести свой субграф в Arbitrum (L2) From 29871346f6abb6b3636b59166704c6a59dc7849c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:16 -0500 Subject: [PATCH 0315/1534] New translations l2-transfer-tools-guide.mdx (Swedish) --- .../src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx index be282774127d..4dde699e5079 100644 --- a/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/sv/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: L2 Guide för överföringsverktyg The Graph har gjort det enkelt att flytta till L2 på Arbitrum One. För varje protokolldeltagare finns det en uppsättning L2 överföringsverktyg som gör överföringen till L2 sömlös för alla nätverksdeltagare. Dessa verktyg kräver att du följer en specifik uppsättning steg beroende på vad du överför. -Några vanliga frågor om dessa verktyg besvaras i [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). De vanliga frågorna innehåller djupgående förklaringar av hur du använder verktygen, hur de fungerar och saker att tänka på när du använder dem. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Så här överför du din subgraf till Arbitrum (L2) From 5e81129774e2281cc1e78625cf1e968252f2f8f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:17 -0500 Subject: [PATCH 0316/1534] New translations l2-transfer-tools-guide.mdx (Turkish) --- .../src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx index 74c3180a86bc..15b3bfb1004e 100644 --- a/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/tr/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Katman2 Transfer Araçları Rehberi Graph, Arbitrum One üzerinde Katman2'ye geçişi kolaylaştırmıştır. Her protokol katılımcısı için, tüm ağ katılımcıları adına Katman2'ye transferi sorunsuz hale getirmek için bir dizi Katman2 Transfer Aracı vardır. Bu araçlar, ne transfer ettiğinize bağlı olarak belirli bir dizi adımı izlemenizi gerektirecektir. -Bu araçlarla ilgili sıkça sorulan bazı sorular [Katman2 Transfer Araçları SSS](/archived/arbitrum/l2-transfer-tools-faq/) bölümünde yanıtlanmaktadır. SSS, araçların nasıl kullanılacağı, nasıl çalıştıkları ve kullanırken akılda tutulması gerekenler hakkında derinlemesine açıklamalar içermektedir. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## Subgraph'ınızı Arbitrum'a nasıl transfer edebilirsiniz (Katman2) From 30fdea272ce63799f817832e34ce1d4d39a8e62f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:19 -0500 Subject: [PATCH 0317/1534] New translations l2-transfer-tools-guide.mdx (Chinese Simplified) --- .../arbitrum/l2-transfer-tools-guide.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-guide.mdx index a8f4863fc85f..da4756a834dd 100644 --- a/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/zh/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -1,20 +1,20 @@ --- -title: L2转移工具指南 +title: L2 Transfer Tools Guide --- The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. -关于这些工具的一些常见问题在 L2 Transfer Tools FAQ(/archived/arbitrum/l2-transfer-tools-faq/) 中有详细解答。FAQ 中深入解释了如何使用这些工具、它们的工作原理以及在使用过程中需要注意的事项。 +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. -## 如何将你的子图转移到 Arbitrum(L2) +## 如何将你的子图转移到 Arbitrum (L2) ## 将子图转移到 Arbitrum 的好处 -过去一年里,Graph社区和核心开发人员一直在为迁移到 Arbitrum 做准备(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) 。Arbitrum 是一种二层网络或“L2”区块链,继承了以太坊的安全性,但提供了大幅降低的燃气费用。 +过去一年里,Graph社区和核心开发人员一直在为迁移到 Arbitrum [做准备](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) 。Arbitrum 是一种二层网络或“L2”区块链,继承了以太坊的安全性,但提供了大幅降低的燃气费用。 -当您将子图发布或升级到The Graph Network时,您将与协议上的智能合约进行交互,这需要使用以太币(ETH)支付燃气费用。通过将您的子图迁移到Arbitrum,将来对您的子图进行的任何更新将需要更低的燃气费用。较低的费用以及L2网络上平滑的曲线,使其他策展人更容易在您的子图上进行策展,从而增加了在您的子图上的索引人的奖励。这种较低成本的环境还使得索引器更便宜地对您的子图进行索引和服务。在接下来的几个月里,Arbitrum上的索引奖励将增加,而以太坊主网上的索引奖励将减少,因此越来越多的索引器将会将他们的质押迁移到L2网络并在该网络上设置运营。 +当您将子图发布或升级到Graph网络时,您将与协议上的智能合约进行交互,这需要使用以太币(ETH)支付燃气费用。通过将您的子图迁移到Arbitrum,将来对您的子图进行的任何更新将需要更低的燃气费用。较低的费用以及L2网络上平滑的曲线,使其他策展人更容易在您的子图上进行策展,从而增加了在您的子图上的索引人的奖励。这种较低成本的环境还使得索引人更便宜地对您的子图进行索引和服务。在接下来的几个月里,Arbitrum上的索引奖励将增加,而以太坊主网上的索引奖励将减少,因此越来越多的索引器将会将他们的质押迁移到L2网络并在该网络上设置运营。 ## 理解信号、你的 L1 子图和查询 URL 的变化 @@ -24,7 +24,7 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part 其他策展人可以选择是否提取他们所占份额的 GRT,或者将其转移到 L2 上的同一子图上,以铸造新的策展信号。如果一个子图所有者不将他们的子图转移到 L2 并通过合约调用手动废弃它,那么策展人将收到通知并可以提取他们的策展。 -一旦子图转移完成,由于所有策展都转换为 GRT,索引人将不再因索引子图而获得奖励。但是,有些索引人会保持对转移的子图进行 24 小时的服务,并立即开始在 L2 上进行子图索引。由于这些索引人已经对子图进行了索引,所以无需等待子图同步,几乎可以立即查询 L2 子图。 +一旦子图转移完成,由于所有策展都转换为 GRT,索引器将不再因索引子图而获得奖励。但是,有些索引器会保持对转移的子图进行 24 小时的服务,并立即开始在 L2 上进行子图索引。由于这些索引人已经对子图进行了索引,所以无需等待子图同步,几乎可以立即查询 L2 子图。 对 L2 子图的查询需要使用不同的 URL(on `arbitrum-gateway.thegraph.com`),但 L1 URL 将继续工作至少 48 小时。之后,L1 网关将把查询转发到 L2 网关(一段时间内),但这会增加延迟,因此建议尽快将所有查询切换到新的 URL。 @@ -52,7 +52,7 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part ![transfer tool](/img/L2-transfer-tool1.png) -如果你使用拥有子图的钱包连接到 Explorer,你还可以在 Explorer 上的子图页面上找到它: +如果你使用拥有子图的钱包连接到浏览器,你还可以在浏览器上的子图页面上找到它: ![Transferring to L2](/img/transferToL2.png) @@ -130,13 +130,13 @@ The Graph has made it easy to move to L2 on Arbitrum One. For each protocol part 在开始转移之前,您必须决定哪个地址将在L2上拥有策展信号(参见上文的“选择您的L2钱包”),并建议您当需要在L2上重试消息执行时,提前转移一些用于手续费的 ETH到Arbitrum 上。您可以在某些交易所购买 ETH,并直接提款到 Arbitrum,或者您可以使用 Arbitrum 跨链桥将 ETH 从主网钱包发送到 L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - 由于 Arbitrum 上的燃料费用非常低,您可能只需要一小笔金额,例如 0.01 ETH 应该足够了。 -如果您策展的子图已经转移到L2,您将在Exploer上看到一条消息,告诉您正在为转移的子图进行策展。 +如果您策展的子图已经转移到L2,您将在浏览器上看到一条消息,告诉您正在为转移的子图进行策展。 在查看子图页面时,您可以选择撤回或转移策展。点击“将信号转移到 Arbitrum”将打开转移工具。 ![Transfer signal](/img/transferSignalL2TransferTools.png) -打开转移工具后,如果您没有任何ETH,可能需要向您的钱包添加一些ETH。然后,您将能够在“Receiving wallet address”字段中输入L2钱包地址 - 请确保在此处输入正确的地址。点击转移信号将提示您在钱包上执行交易(注意,包含了一些ETH用于支付L2的燃料费);这将启动转移过程。 +打开转移工具后,如果您没有任何ETH,可能需要向您的钱包添加一些ETH。然后,您将能够在“Receiving wallet address”字段中输入L2钱包地址 - 请确保在此处输入正确的地址。点击转移信号将提示您在钱包上执行交易(注意,包含了一些ETH用于支付L2的燃气费);这将启动转移过程。 如果执行了此步骤,请确保在不到7天的时间内继续完成第3步,否则您的信号GRT将丢失。这是由于在Arbitrum上的L1-L2消息传递的工作方式:通过跨链桥发送的消息是“可重试的票据”,必须在7天内执行。如果Arbitrum的燃气价格出现波动,初始执行可能需要重试。 From bf9abd40310765769fe60c9590f37aba7f5ad4d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:20 -0500 Subject: [PATCH 0318/1534] New translations l2-transfer-tools-guide.mdx (Urdu (Pakistan)) --- .../src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx index a4d2d0848d9c..2099dcb22749 100644 --- a/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/ur/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: L2 ٹرانسفر ٹولز گائڈ گراف نے Arbitrum One پر L2 پر منتقل ہونا آسان کر دیا ہے۔ پروٹوکول کے شریک کے لیے، تمام نیٹ ورک کے شرکاء کے لیے L2 میں منتقلی کو ہموار بنانے کے لیے L2 ٹرانسفر ٹولز کا ایک سیٹ موجود ہے۔ یہ ٹولز آپ سے اس بات پر منحصر ہوں گے کہ آپ کیا منتقل کر رہے ہیں۔ -ان ٹولز کے بارے میں اکثر پوچھے جانے والے سوالات کا جواب [L2 ٹرانسفر ٹولز اکثر پوچھے گئے سوالات](/archived/arbitrum/l2-transfer-tools-faq/). اکثر پوچھے جانے والے سوالات میں ٹولز کو استعمال کرنے کے طریقے، وہ کیسے کام کرتے ہیں، اور ان کو استعمال کرتے وقت ذہن میں رکھنے والی چیزوں کی گہرائی سے وضاحت پر مشتمل ہے۔ +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## اپنے سب گراف کو Arbitrum (L2) میں کیسے منتقل کریں From 568bd4cc66a4fc87389c2edbd9c34246ee55bd95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:21 -0500 Subject: [PATCH 0319/1534] New translations l2-transfer-tools-guide.mdx (Vietnamese) --- .../src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx index fc7891919b3e..78ec8c82a911 100644 --- a/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/vi/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: Hướng dẫn sử dụng công cụ chuyển L2 The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. -Một số câu hỏi thường gặp về những công cụ này được trả lời trong [Câu hỏi thường gặp về Công cụ chuyển L2](/archived/arbitrum/l2-transfer-tools-faq/). Câu hỏi thường gặp chứa các giải thích sâu sắc về cách sử dụng các công cụ, cách chúng hoạt động và những điều cần lưu ý khi sử dụng chúng. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) From 5e1d01c321ad77271c9c0a1411c6c2f8db97fc8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:22 -0500 Subject: [PATCH 0320/1534] New translations l2-transfer-tools-guide.mdx (Marathi) --- .../src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx index 8ee89c0dd5b8..cb0215fe9cd0 100644 --- a/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/mr/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: L2 Transfer Tools Guide ग्राफने Arbitrum One वर L2 वर जाणे सोपे केले आहे. प्रत्येक प्रोटोकॉल सहभागीसाठी, सर्व नेटवर्क सहभागींसाठी L2 मध्ये हस्तांतरण अखंडपणे करण्यासाठी L2 हस्तांतरण साधनांचा संच आहे. तुम्ही काय हस्तांतरित करत आहात त्यानुसार या साधनांसाठी तुम्हाला चरणांच्या विशिष्ट संचाचे अनुसरण करणे आवश्यक आहे. -या साधनांबद्दलच्या काही वारंवार प्रश्नांची उत्तरे [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/) मध्ये दिली आहेत. FAQ मध्ये साधने कशी वापरायची, ते कसे कार्य करतात आणि ते वापरताना लक्षात ठेवण्यासारख्या गोष्टींचे सखोल स्पष्टीकरण असते. +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## तुमचा सबग्राफ आर्बिट्रम (L2) वर कसा हस्तांतरित करायचा From 5eb37d415d227882c5a922d772aeecf8a689f8c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:23 -0500 Subject: [PATCH 0321/1534] New translations l2-transfer-tools-guide.mdx (Hindi) --- .../src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx index 1a29f49d8527..22cea8b3617f 100644 --- a/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/src/pages/hi/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -4,7 +4,7 @@ title: L2 Transfer Tools Guide The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +इन टूल्स के बारे में कुछ सामान्य प्रश्नों के उत्तर [L2 Transfer Tools FAQ](/archived/arbitrum/l2-transfer-tools-faq/) में दिए गए हैं। FAQs में इन टूल्स का उपयोग कैसे करें, वे कैसे काम करते हैं, और उनका उपयोग करते समय ध्यान में रखने वाली बातें विस्तृत रूप से समझाई गई हैं। ## अपने सबग्राफ को आर्बिट्रम (L2) में कैसे स्थानांतरित करें From bc8069fdf439f86c2b9168f1ac73abe1883bc701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:24 -0500 Subject: [PATCH 0322/1534] New translations sunrise.mdx (Romanian) --- website/src/pages/ro/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ro/archived/sunrise.mdx b/website/src/pages/ro/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/ro/archived/sunrise.mdx +++ b/website/src/pages/ro/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From bd556d88a6e2bae13e5a07867032dfeaa3b6437c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:25 -0500 Subject: [PATCH 0323/1534] New translations sunrise.mdx (French) --- website/src/pages/fr/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/fr/archived/sunrise.mdx b/website/src/pages/fr/archived/sunrise.mdx index 69849570e085..575d138c0f55 100644 --- a/website/src/pages/fr/archived/sunrise.mdx +++ b/website/src/pages/fr/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 4c2fc91aa651912748d4c43e8d909cc4ea734102 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:26 -0500 Subject: [PATCH 0324/1534] New translations sunrise.mdx (Spanish) --- website/src/pages/es/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/es/archived/sunrise.mdx b/website/src/pages/es/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/es/archived/sunrise.mdx +++ b/website/src/pages/es/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 2a8e13ae53d2bf1ba8ac5f7ad7672a1f1dba7c48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:27 -0500 Subject: [PATCH 0325/1534] New translations sunrise.mdx (Arabic) --- website/src/pages/ar/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ar/archived/sunrise.mdx b/website/src/pages/ar/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/ar/archived/sunrise.mdx +++ b/website/src/pages/ar/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 8e516086c383bce38dd6874a1e10728ae58381ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:28 -0500 Subject: [PATCH 0326/1534] New translations sunrise.mdx (Czech) --- website/src/pages/cs/archived/sunrise.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/cs/archived/sunrise.mdx b/website/src/pages/cs/archived/sunrise.mdx index c1fd503ed6f4..71b86ac159ff 100644 --- a/website/src/pages/cs/archived/sunrise.mdx +++ b/website/src/pages/cs/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Časté dotazy po východu slunce + aktualizace na síť Graf +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Poznámka: Východ slunce decentralizovaných dat skončil 12. června 2024. @@ -33,7 +34,7 @@ Upgrade Indexer byl implementován za účelem zlepšení zkušeností s upgrade ### Co dělá upgrade Indexer? - Zavádí řetězce, které ještě nezískaly odměnu za indexaci v síti Graf, a zajišťuje, aby byl po zveřejnění podgrafu co nejrychleji k dispozici indexátor pro obsluhu dotazů. -- Podporuje řetězce, které byly dříve dostupné pouze v hostované službě. Úplný seznam podporovaných řetězců najdete [zde](/supported-networks/). +- It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). - Indexátoři, kteří provozují upgrade indexátoru, tak činí jako veřejnou službu pro podporu nových podgrafů a dalších řetězců, kterým chybí indexační odměny, než je Rada grafů schválí. ### Proč Edge & Node spouští aktualizaci Indexer? From 07c50d51466bb2bea84677a8210e3b81f36899fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:29 -0500 Subject: [PATCH 0327/1534] New translations sunrise.mdx (German) --- website/src/pages/de/archived/sunrise.mdx | 81 ++++++++++++----------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/website/src/pages/de/archived/sunrise.mdx b/website/src/pages/de/archived/sunrise.mdx index f9419c36d642..398fe1ca72f7 100644 --- a/website/src/pages/de/archived/sunrise.mdx +++ b/website/src/pages/de/archived/sunrise.mdx @@ -1,79 +1,80 @@ --- -title: Post-Sunrise + Upgrading to The Graph Network FAQ +title: Post-Sunrise + Upgrade auf The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- -> Note: The Sunrise of Decentralized Data ended June 12th, 2024. +> Hinweis: Die Sunrise der dezentralisierten Daten endete am 12. Juni 2024. -## What was the Sunrise of Decentralized Data? +## Was war die Sunrise der dezentralisierten Daten? -The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. +Die Sunrise of Decentralized Data war eine Initiative, die von Edge & Node angeführt wurde. Diese Initiative ermöglichte es Subgraph-Entwicklern, nahtlos auf das dezentrale Netzwerk von The Graph zu wechseln. -This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. +Dieser Plan stützt sich auf frühere Entwicklungen des Graph-Ökosystems, einschließlich eines aktualisierten Indexers, der Abfragen auf neu veröffentlichte Subgraphen ermöglicht. -### What happened to the hosted service? +### Was ist mit dem gehosteten Dienst passiert? -The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. +Die Query-Endpunkte des gehosteten Dienstes sind nicht mehr verfügbar, und Entwickler können keine neuen Subgraphen für den gehosteten Dienst bereitstellen. -During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. +Während des Upgrade-Prozesses konnten die Besitzer von gehosteten Service-Subgraphen ihre Subgraphen auf The Graph Network aktualisieren. Außerdem konnten Entwickler automatisch aktualisierte Subgraphen beanspruchen. -### Was Subgraph Studio impacted by this upgrade? +### Wurde Subgraph Studio durch dieses Upgrade beeinträchtigt? -No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. +Nein, Subgraph Studio wurde durch Sunrise nicht beeinträchtigt. Subgraphen standen sofort für Abfragen zur Verfügung, angetrieben durch den Upgrade Indexer, der die gleiche Infrastruktur wie der gehostete Dienst nutzt. -### Why were subgraphs published to Arbitrum, did it start indexing a different network? +### Warum wurden Subgraphen auf Arbitrum veröffentlicht, hat es begonnen, ein anderes Netzwerk zu indizieren? -The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) +Das Graph Network wurde zunächst auf dem Ethereum Mainnet eingesetzt, wurde aber später auf Arbitrum One verschoben, um die Gaskosten für alle Nutzer zu senken. Infolgedessen werden alle neuen Subgraphen im Graph Network auf Arbitrum veröffentlicht, damit Indexer sie unterstützen können. Arbitrum ist das Netzwerk, in dem Subgraphen veröffentlicht werden, aber Subgraphen können jedes der [unterstützten Netzwerke](/supported-networks/) indizieren. -## About the Upgrade Indexer +## Über den Upgrade Indexer -> The upgrade Indexer is currently active. +> Der Upgrade Indexer ist derzeit aktiv. -The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. +Der Upgrade Indexer wurde implementiert, um das Upgrade von Subgraphen vom gehosteten Dienst zu The Graph Network zu verbessern und neue Versionen von bestehenden Subgraphen, die noch nicht indiziert wurden, zu unterstützen. -### What does the upgrade Indexer do? +### Was macht der Upgrade Indexer? -- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. -- It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). -- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. +- Er bootet Ketten, die noch keinen Reward für die Indizierung im The Graph Network erhalten haben, und stellt sicher, dass ein Indexer zur Verfügung steht, um Anfragen so schnell wie möglich zu bedienen, nachdem ein Subgraph veröffentlicht wurde. +- Er unterstützt Ketten, die bisher nur über den gehosteten Dienst verfügbar waren. Eine umfassende Liste der unterstützten Ketten finden Sie [hier](/supported-networks/). +- Indexer, die einen Upgrade Indexer betreiben, tun dies als öffentlichen Dienst, um neue Subgraphen und zusätzliche Ketten zu unterstützen, für die es noch keine Rewards gibt, bevor sie vom The Graph Council genehmigt werden. -### Why is Edge & Node running the upgrade Indexer? +### Warum führen Edge & Node den Upgrade Indexer aus? -Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. +Edge & Node haben in der Vergangenheit den gehosteten Dienst gewartet und verfügen daher bereits über synchronisierte Daten für Subgraphen des gehosteten Dienstes. -### What does the upgrade indexer mean for existing Indexers? +### Was bedeutet der Upgrade Indexer für bestehende Indexer? -Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. +Ketten, die bisher nur vom gehosteten Dienst unterstützt wurden, wurden den Entwicklern auf The Graph Network zunächst ohne Rewards zur Verfügung gestellt. -However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. +Durch diese Aktion wurden jedoch Abfragegebühren für jeden interessierten Indexer freigeschaltet und die Anzahl der im Graph Network veröffentlichten Subgraphen erhöht. Infolgedessen haben Indexer mehr Möglichkeiten, diese Subgraphen im Austausch gegen Abfragegebühren zu indizieren und zu bedienen, noch bevor Indexing Rewards für eine Kette aktiviert sind. -The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. +Der Upgrade-Indexer versorgt die Indexer-Community auch mit Informationen über die potenzielle Nachfrage nach Subgraphen und neuen Ketten im The Graph Network. -### What does this mean for Delegators? +### Was bedeutet das für die Delegatoren? -The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. +Der Upgrade-Indexer bietet eine große Chance für Delegatoren. Da mehr Subgraphen vom gehosteten Dienst auf The Graph Network umgestellt werden können, profitieren die Delegatoren von der erhöhten Netzwerkaktivität. -### Did the upgrade Indexer compete with existing Indexers for rewards? +### Konkurriert der aktualisierte Indexer mit bestehenden Indexern um Rewards? -No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. +Nein, der Upgrade-Indexer weist nur den Mindestbetrag pro Subgraph zu und sammelt keine Rewards für die Indizierung. -It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. +Er arbeitet "nach Bedarf" und dient als Ausweichlösung, bis mindestens drei andere Indexer im Netz für die jeweiligen Ketten und Subgraphen eine ausreichende Dienstqualität erreicht haben. -### How does this affect subgraph developers? +### Wie wirkt sich das für die Entwickler von Subgraphen aus? -Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. +Entwickler von Subgraphen können ihre Subgraphen auf The Graph Network fast sofort nach dem Upgrade vom gehosteten Dienst oder nach dem [Veröffentlichen aus Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/) abfragen, da keine Vorlaufzeit für die Indizierung erforderlich war. Bitte beachten Sie, dass das [Erstellen eines Subgraphen](/developing/creating-a-subgraph/) von diesem Upgrade nicht betroffen ist. -### How does the upgrade Indexer benefit data consumers? +### Welchen Nutzen hat der Upgrade-Indexer für die Datenkonsumenten? -The upgrade Indexer enables chains on the network that were previously only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. +Der Upgrade-Indexer ermöglicht Verkettungen im Netz, die bisher nur vom gehosteten Dienst unterstützt wurden. Daher erweitert er den Umfang und die Verfügbarkeit von Daten, die im Netzwerk abgefragt werden können. -### How does the upgrade Indexer price queries? +### Wie bewertet der Upgrade-Indexer Abfragen? -The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. +Der Upgrade-Indexer berechnet Abfragen zum Marktpreis, um den Markt für Abfragegebühren nicht zu beeinflussen. -### When will the upgrade Indexer stop supporting a subgraph? +### Wann wird der Upgrade-Indexer aufhören, einen Subgraphen zu unterstützen? -The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. +Der Upgrade-Indexer unterstützt einen Subgraphen so lange, bis mindestens 3 andere Indexer erfolgreich und konsistent die an ihn gerichteten Abfragen bedienen. -Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. +Außerdem unterstützt der Upgrade-Indexer einen Subgraphen nicht mehr, wenn er in den letzten 30 Tagen nicht abgefragt wurde. -Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. +Für andere Indexer besteht ein Anreiz, Subgraphen mit laufendem Abfragevolumen zu unterstützen. Das Anfragevolumen an den Upgrade-Indexer sollte gegen Null tendieren, da er eine kleine Zuweisungsgröße hat, und andere Indexer sollten für Anfragen vor ihm ausgewählt werden. From 9af3923b8203a8be2167f0e996eb783d01d1cbb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:30 -0500 Subject: [PATCH 0328/1534] New translations sunrise.mdx (Italian) --- website/src/pages/it/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/it/archived/sunrise.mdx b/website/src/pages/it/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/it/archived/sunrise.mdx +++ b/website/src/pages/it/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From c6c24bf9db141a6cd5b979e395a2a9e847c355bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:31 -0500 Subject: [PATCH 0329/1534] New translations sunrise.mdx (Japanese) --- website/src/pages/ja/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ja/archived/sunrise.mdx b/website/src/pages/ja/archived/sunrise.mdx index 0ffb746a1721..eac51559a724 100644 --- a/website/src/pages/ja/archived/sunrise.mdx +++ b/website/src/pages/ja/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From c54a051b9d803bd5544b037bfd67e377ca948bb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:32 -0500 Subject: [PATCH 0330/1534] New translations sunrise.mdx (Korean) --- website/src/pages/ko/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ko/archived/sunrise.mdx b/website/src/pages/ko/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/ko/archived/sunrise.mdx +++ b/website/src/pages/ko/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 8014a0d5f4a5d07e104b320e3501ccfe42270271 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:33 -0500 Subject: [PATCH 0331/1534] New translations sunrise.mdx (Dutch) --- website/src/pages/nl/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/nl/archived/sunrise.mdx b/website/src/pages/nl/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/nl/archived/sunrise.mdx +++ b/website/src/pages/nl/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 5724f746b844d387f5de35603857ce313b7c3ea5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:34 -0500 Subject: [PATCH 0332/1534] New translations sunrise.mdx (Polish) --- website/src/pages/pl/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/pl/archived/sunrise.mdx b/website/src/pages/pl/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/pl/archived/sunrise.mdx +++ b/website/src/pages/pl/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 32480cb5521580c7a5175f9e237c93ca398b2d97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:35 -0500 Subject: [PATCH 0333/1534] New translations sunrise.mdx (Portuguese) --- website/src/pages/pt/archived/sunrise.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pt/archived/sunrise.mdx b/website/src/pages/pt/archived/sunrise.mdx index 85a7eefe2397..f7e7a0faf5f5 100644 --- a/website/src/pages/pt/archived/sunrise.mdx +++ b/website/src/pages/pt/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Perguntas Frequentes sobre o Pós-Nascer do Sol + Atualizações para a Graph Network +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Nota: O Nascer do Sol dos Dados Descentralizados foi encerrado em 12 de junho de 2024. @@ -22,7 +23,7 @@ Não, o Subgraph Studio não foi impactado pelo Nascer do Sol. Os subgraphs esta ### Por que subgraphs eram publicados ao Arbitrum, eles começaram a indexar uma rede diferente? -A Graph Network foi lançada inicialmente à mainnet do Ethereum, mas migrou ao Arbitrum One apra reduzir custos de gas para todos os utilizadores. Assim, todos os subgraphs novos são publicados à Graph Network no Arbitrum para que possam ser apoiados por Indexadores. O Arbitrum é a rede em qual os subgraphs são publicados, mas subgraphs podem indexar quaisquer das [redes apoiadas](/supported-networks/) +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) ## Sobre o Indexador de Atualização @@ -33,7 +34,7 @@ O Indexador de atualização foi construído para melhorar a experiência de atu ### O que o Indexador de atualização faz? - Ele inicializa chains que ainda não tenham recompensas de indexação na Graph Network, e garante que um Indexador esteja disponível para servir queries o mais rápido possível após a publicação de um subgraph. -- Ele apoia chains que antigamente só estavam disponíveis no serviço hospedado. Veja uma lista compreensiva de chains apoiadas [aqui](/supported-networks/). +- It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). - Indexadores que operam um Indexador de atualização o fazem como um serviço público, para apoiar novos subgraphs e chains adicionais que não tenham recompensas de indexação antes da aprovação do Graph Council. ### Porque a Edge & Node executa o Indexador de atualização? @@ -60,7 +61,7 @@ Ele opera numa base de "necessidade" e serve como uma reserva até que uma cota ### Como isto afeta os programadores de subgraph? -Progmadores de subgraph poderão enviar queries para os seus subgraphs na Graph Network quase imediatamente após migrar do serviço hospedado ou [publicá-los do Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), já que não será necessário tempo de espera para a indexação. Note que a [criação de subgraphs](/developing/creating-a-subgraph/) não foi impactada por esta atualização. +Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. ### Como o Indexador de atualizações beneficia consumidores de dados? From 4512bd2f6fa495f6a07c6f86763a2c783c6355ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:36 -0500 Subject: [PATCH 0334/1534] New translations sunrise.mdx (Russian) --- website/src/pages/ru/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ru/archived/sunrise.mdx b/website/src/pages/ru/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/ru/archived/sunrise.mdx +++ b/website/src/pages/ru/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From c4ef0a228bc847ca95e52b33187588f4b48618b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:39 -0500 Subject: [PATCH 0335/1534] New translations sunrise.mdx (Swedish) --- website/src/pages/sv/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/sv/archived/sunrise.mdx b/website/src/pages/sv/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/sv/archived/sunrise.mdx +++ b/website/src/pages/sv/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 4bb0f7bec0c1aab2cd0a6ad8a1ab53b3ee2255cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:40 -0500 Subject: [PATCH 0336/1534] New translations sunrise.mdx (Turkish) --- website/src/pages/tr/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/tr/archived/sunrise.mdx b/website/src/pages/tr/archived/sunrise.mdx index 91c6e09fd7c6..f7d204bb791f 100644 --- a/website/src/pages/tr/archived/sunrise.mdx +++ b/website/src/pages/tr/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 278004b4abe677459e551e20336391811624e781 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:41 -0500 Subject: [PATCH 0337/1534] New translations sunrise.mdx (Ukrainian) --- website/src/pages/uk/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/uk/archived/sunrise.mdx b/website/src/pages/uk/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/uk/archived/sunrise.mdx +++ b/website/src/pages/uk/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 49118be23bbadae34d34070956e87bcb0842747a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:41 -0500 Subject: [PATCH 0338/1534] New translations sunrise.mdx (Chinese Simplified) --- website/src/pages/zh/archived/sunrise.mdx | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/src/pages/zh/archived/sunrise.mdx b/website/src/pages/zh/archived/sunrise.mdx index 0e7fafa344d0..a768ee33d016 100644 --- a/website/src/pages/zh/archived/sunrise.mdx +++ b/website/src/pages/zh/archived/sunrise.mdx @@ -1,10 +1,11 @@ --- -title: Post-Sunrise + Upgrading to The Graph Network FAQ +title: 黎明后+升级到Graph网络常见问题 +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. -## What was the Sunrise of Decentralized Data? +## 去中心化数据的黎明是什么? The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. @@ -36,7 +37,7 @@ The upgrade Indexer was implemented to improve the experience of upgrading subgr - It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). - Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. -### 为什么 Edge & Node 运行升级索引人? +### 为什么 Edge & Node 运行升级索引器? Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. @@ -48,7 +49,7 @@ However, this action unlocked query fees for any interested Indexer and increase The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. -### 这对于委托人来说意味着什么? +### What does this mean for Delegators? The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. From 414d2ddf41c4a3943252483d46055ef5df2b1a44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:42 -0500 Subject: [PATCH 0339/1534] New translations sunrise.mdx (Urdu (Pakistan)) --- website/src/pages/ur/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ur/archived/sunrise.mdx b/website/src/pages/ur/archived/sunrise.mdx index 9591fa169909..b1ad2e6523a3 100644 --- a/website/src/pages/ur/archived/sunrise.mdx +++ b/website/src/pages/ur/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From e8653bc0ef0200319798f0f61231d6e027090505 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:43 -0500 Subject: [PATCH 0340/1534] New translations sunrise.mdx (Vietnamese) --- website/src/pages/vi/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/vi/archived/sunrise.mdx b/website/src/pages/vi/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/vi/archived/sunrise.mdx +++ b/website/src/pages/vi/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From d77dd5c3285d95618dbbe75203a589c85161b8b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:44 -0500 Subject: [PATCH 0341/1534] New translations sunrise.mdx (Marathi) --- website/src/pages/mr/archived/sunrise.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/mr/archived/sunrise.mdx b/website/src/pages/mr/archived/sunrise.mdx index f9419c36d642..eb18a93c506c 100644 --- a/website/src/pages/mr/archived/sunrise.mdx +++ b/website/src/pages/mr/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: Post-Sunrise + Upgrading to The Graph Network FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > Note: The Sunrise of Decentralized Data ended June 12th, 2024. From 8f8c72bd85e2ea47d2186ec74b626b08abb5da9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:45 -0500 Subject: [PATCH 0342/1534] New translations sunrise.mdx (Hindi) --- website/src/pages/hi/archived/sunrise.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/archived/sunrise.mdx b/website/src/pages/hi/archived/sunrise.mdx index 0ac3b30cc40e..64396d2fb998 100644 --- a/website/src/pages/hi/archived/sunrise.mdx +++ b/website/src/pages/hi/archived/sunrise.mdx @@ -1,5 +1,6 @@ --- title: पोस्ट-सनराइज + The Graph Network में अपग्रेडिंग FAQ +sidebarTitle: Post-Sunrise Upgrade FAQ --- > नोट: Decentralized Data का Sunrise 12 जून, 2024 को समाप्त हो गया। @@ -22,7 +23,7 @@ title: पोस्ट-सनराइज + The Graph Network में अप ### सबग्राफ्स को Arbitrum पर क्यों प्रकाशित किया गया, क्या इसने एक अलग नेटवर्क को इंडेक्स करना शुरू किया? -The Graph Network को शुरू में Ethereum मेननेट पर तैनात किया गया था, लेकिन बाद में सभी उपयोगकर्ताओं के लिए गैस लागत को कम करने के लिए इसे Arbitrum वन पर स्थानांतरित कर दिया गया। परिणामस्वरूप, सभी नए सबग्राफ़ Arbitrum पर The Graph Network पर प्रकाशित किए जाते हैं ताकि Indexer उनका समर्थन कर सकें। Arbitrum वह नेटवर्क है जिस पर सबग्राफ़ प्रकाशित किए जाते हैं, लेकिन सबग्राफ़ किसी भी [समर्थित नेटवर्क](/supported-networks/) को इंडेक्स कर सकते हैं। +The Graph Network को पहले Ethereum mainnet पर डिप्लॉय किया गया था, लेकिन गैस लागत को कम करने के लिए इसे बाद में Arbitrum One पर स्थानांतरित कर दिया गया। परिणामस्वरूप, सभी नए सबग्राफ को Arbitrum पर The Graph Network में प्रकाशित किया जाता है ताकि Indexers उन्हें सपोर्ट कर सकें। Arbitrum वह नेटवर्क है जिस पर सबग्राफ को प्रकाशित किया जाता है, लेकिन सबग्राफ [supported networks](/supported-networks/) में से किसी पर भी index कर सकते हैं ## About the Upgrade Indexer @@ -33,7 +34,7 @@ The Graph Network को शुरू में Ethereum मेननेट प ### अपग्रेड Indexer क्या करता है? - यह उन चेन को बूटस्ट्रैप करता है जिन्हें अभी तक The Graph Network पर इंडेक्सिंग पुरस्कार नहीं मिले हैं और यह सुनिश्चित करता है कि एक Indexer उपलब्ध हो ताकि एक Subgraph प्रकाशित होने के तुरंत बाद क्वेरी को यथाशीघ्र सेवा दी जा सके। -- यह उन चेन का समर्थन करता है जो पहले केवल Hosted Service पर उपलब्ध थीं। समर्थित चेन की एक व्यापक सूची [यहां](https://thegraph. com/supported-networks/) देखें। +- यह उन chain को भी सपोर्ट करता है जो पहले केवल Hosted Service पर उपलब्ध थीं। सपोर्टेड chain की व्यापक सूची [यहां](/supported-networks/) देखें। - जो Indexer अपग्रेड इंडेक्सर का संचालन करते हैं, वे नए सबग्राफ़ और अतिरिक्त चेन का समर्थन करने के लिए एक सार्वजनिक सेवा के रूप में ऐसा करते हैं जो इंडेक्सिंग पुरस्कारों की कमी का सामना कर रहे हैं, जब तक कि The Graph काउंसिल उन्हें मंजूरी नहीं देती। ### Why is Edge & Node running the upgrade Indexer? @@ -60,7 +61,7 @@ Edge & Node ने ऐतिहासिक रूप से होस्टे ### यह Subgraph डेवलपर्स को कैसे प्रभावित करता है? -सबग्राफ डेवलपर्स अपने सबग्राफ को The Graph Network पर लगभग तुरंत ही अपग्रेड किए जाने के बाद या [Subgraph Studio से प्रकाशित करने](/subgraphs/developing/publishing/publishing-a-subgraph/) के बाद क्वेरी कर सकते हैं, क्योंकि indexing के लिए कोई लीड टाइम की आवश्यकता नहीं थी। कृपया ध्यान दें कि [सबग्राफ बनाना](/developing/creating-a-subgraph/) इस अपग्रेड से प्रभावित नहीं हुआ था। +सबग्राफ डेवलपर्स अपने सबग्राफ को The Graph Network पर लगभग तुरंत क्वेरी कर सकते हैं, जब वे होस्टेड सेवा से या Subgraph Studio()/subgraphs/developing/publishing/publishing-a-subgraph/ से प्रकाशित करते हैं, क्योंकि इंडेक्सिंग के लिए कोई लीड टाइम आवश्यक नहीं है। कृपया ध्यान दें कि सबग्राफ बनाना(/developing/creating-a-subgraph/) इस अपग्रेड से प्रभावित नहीं हुआ था। ### अपग्रेड Indexer डेटा उपभोक्ताओं को कैसे लाभ पहुंचाता है? From 052e01e464d768ba5f8917e5e829e2fad92ee506 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:47 -0500 Subject: [PATCH 0343/1534] New translations contracts.mdx (French) --- website/src/pages/fr/contracts.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/fr/contracts.mdx b/website/src/pages/fr/contracts.mdx index 99042adad4ad..7312609bed2d 100644 --- a/website/src/pages/fr/contracts.mdx +++ b/website/src/pages/fr/contracts.mdx @@ -14,7 +14,7 @@ Il s'agit du déploiement principal de The Graph Network. ## Réseau principal -Il s'agissait du déploiement initial de The Graph Network. [En savoir plus](/archived/arbitrum/arbitrum-faq/) sur la mise à l'échelle de The Graph avec Arbitrum. +This was the original deployment of The Graph Network. [Learn more](/archived/arbitrum/arbitrum-faq/) about The Graph's scaling with Arbitrum. From 9f70df1997ad035c2c656eba7bb00096cc3b9994 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:50 -0500 Subject: [PATCH 0344/1534] New translations contracts.mdx (German) --- website/src/pages/de/contracts.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/de/contracts.mdx b/website/src/pages/de/contracts.mdx index 3938844149c1..21838ada7022 100644 --- a/website/src/pages/de/contracts.mdx +++ b/website/src/pages/de/contracts.mdx @@ -1,26 +1,26 @@ --- -title: Protocol Contracts +title: Protokoll-Verträge --- import { ProtocolContractsTable } from '@/contracts' -Below are the deployed contracts which power The Graph Network. Visit the official [contracts repository](https://github.com/graphprotocol/contracts) to learn more. +Nachstehend finden Sie die bereitgestellten Verträge, die das The Graph Network antreiben. Besuchen Sie das offizielle [Vertragsrepository] (https://github.com/graphprotocol/contracts), um mehr zu erfahren. ## Arbitrum -This is the principal deployment of The Graph Network. +Dies ist der Haupteinsatzbereich von The Graph Network. ## Mainnet -This was the original deployment of The Graph Network. [Learn more](/archived/arbitrum/arbitrum-faq/) about The Graph's scaling with Arbitrum. +Dies war die ursprüngliche Bereitstellung von The Graph Network. [Erfahren Sie mehr](/archived/arbitrum/arbitrum-faq/) über die Skalierung von The Graph mit Arbitrum. ## Arbitrum Sepolia -This is the primary testnet for The Graph Network. Testnet is predominantly used by core developers and ecosystem participants for testing purposes. There are no guarantees of service or availability on The Graph's testnets. +Dies ist das primäre Testnetz für The Graph Network. Das Testnet wird vor allem von Kernentwicklern und Teilnehmern des Ökosystems zu Testzwecken genutzt. Es gibt keine Garantien für den Service oder die Verfügbarkeit der Testnetze von The Graph. From e6e23886496e3ba590b9436f376320d98040becd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:53 -0500 Subject: [PATCH 0345/1534] New translations contracts.mdx (Portuguese) --- website/src/pages/pt/contracts.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/contracts.mdx b/website/src/pages/pt/contracts.mdx index 7e8cfa69be60..6940cabc6b5b 100644 --- a/website/src/pages/pt/contracts.mdx +++ b/website/src/pages/pt/contracts.mdx @@ -14,7 +14,7 @@ Este é o lançamento principal da Graph Network. ## Mainnet -Este foi o lançamento original da Graph Network. [Aprenda mais](/archived/arbitrum/arbitrum-faq/) sobre o escalamento do The Graph com Arbitrum. +This was the original deployment of The Graph Network. [Learn more](/archived/arbitrum/arbitrum-faq/) about The Graph's scaling with Arbitrum. From 61d35333a95369da75dcd93f181a6a3352ac9b3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:58 -0500 Subject: [PATCH 0346/1534] New translations contracts.mdx (Hindi) --- website/src/pages/hi/contracts.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/contracts.mdx b/website/src/pages/hi/contracts.mdx index 3938844149c1..0a57ae81839b 100644 --- a/website/src/pages/hi/contracts.mdx +++ b/website/src/pages/hi/contracts.mdx @@ -14,7 +14,7 @@ This is the principal deployment of The Graph Network. ## Mainnet -This was the original deployment of The Graph Network. [Learn more](/archived/arbitrum/arbitrum-faq/) about The Graph's scaling with Arbitrum. +यह The Graph Network का मूल डिप्लॉयमेंट था। Learn more()/archived/arbitrum/arbitrum-faq/ Arbitrum के साथ The Graph के स्केलिंग के बारे में अधिक जानें। From 822e5b9ffb80066bddc19e0972b6cb643bb87a0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:49:59 -0500 Subject: [PATCH 0347/1534] New translations chain-integration-overview.mdx (French) --- .../src/pages/fr/indexing/chain-integration-overview.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/fr/indexing/chain-integration-overview.mdx b/website/src/pages/fr/indexing/chain-integration-overview.mdx index 7f31c5af51aa..4bbb83bdc4a9 100644 --- a/website/src/pages/fr/indexing/chain-integration-overview.mdx +++ b/website/src/pages/fr/indexing/chain-integration-overview.mdx @@ -6,12 +6,12 @@ Un processus d'intégration transparent et basé sur la gouvernance a été con ## Étape 1. Intégration technique -- Please visit [New Chain Integration](/indexing/new-chain-integration/) for information on `graph-node` support for new chains. +- Veuillez consulter [Intégration de nouvelle chaîne](/indexing/new-chain-integration/) pour plus d'informations sur le support `graph-node` pour les nouvelles chaînes. - Les équipes lancent le processus d'intégration du protocole en créant un fil de discussion sur le forum [ici](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (sous-catégorie Nouvelles sources de données sous Gouvernance et GIPs ). L'utilisation du modèle de forum par défaut est obligatoire. ## Étape 2. Validation de l'intégration -- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON-RPC, Firehose or Substreams endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Les équipes collaborent avec les développeurs principaux, Graph Foundation et les opérateurs de GUIs et de passerelles réseau, tels que [Subgraph Studio](https://thegraph.com/studio/), pour assurer un processus d'intégration fluide. Cela implique de fournir l'infrastructure back-end nécessaire, comme les points de terminaison JSON-RPC, Firehose ou Substreams de la chaîne d'intégration. Les équipes souhaitant éviter d'héberger elles-mêmes une telle infrastructure peuvent s'appuyer sur la communauté d'opérateurs de nœuds (indexeurs) de The Graph pour ce faire, avec l'aide de la Fondation. - Les Graph Indexeurs testent l'intégration sur le réseau de testnet du graph. - Les développeurs principaux et les indexeurs surveillent la stabilité, les performances et le déterminisme des données. @@ -38,7 +38,7 @@ Ce processus est lié au service de données Subgraph, applicable uniquement aux Cela n’aurait un impact que sur la prise en charge du protocole pour l’indexation des récompenses sur les subgraphs alimentés par Substreams. La nouvelle implémentation de Firehose nécessiterait des tests sur testnet, en suivant la méthodologie décrite pour l'étape 2 de ce GIP. De même, en supposant que l'implémentation soit performante et fiable, un PR sur la [Matrice de support des fonctionnalités](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) serait requis ( Fonctionnalité de sous-graphe « Sous-flux de sources de données »), ainsi qu'un nouveau GIP pour la prise en charge du protocole pour l'indexation des récompenses. N'importe qui peut créer le PR et le GIP ; la Fondation aiderait à obtenir l'approbation du Conseil. -### 3. How much time will the process of reaching full protocol support take? +### 3. Combien de temps faudra-t-il pour parvenir à la prise en charge complète du protocole ? Le temps nécessaire à la mise en réseau principal devrait être de plusieurs semaines, variant en fonction du temps de développement de l'intégration, de la nécessité ou non de recherches supplémentaires, de tests et de corrections de bugs et, comme toujours, du calendrier du processus de gouvernance qui nécessite les commentaires de la communauté. @@ -46,4 +46,4 @@ La prise en charge du protocole pour l'indexation des récompenses dépend de la ### 4. Comment les priorités seront-elles gérées ? -Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. +Comme pour le point #3, cela dépendra de l'état de préparation général et de la capacité des parties prenantes impliquées. Par exemple, une nouvelle chaîne avec une toute nouvelle implémentation de Firehose peut prendre plus de temps que des intégrations qui ont déjà été testées sur le terrain ou qui sont plus avancées dans le processus de gouvernance. From 978bb5903ca02951903d5ae3a5dbeeb60a733e0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:02 -0500 Subject: [PATCH 0348/1534] New translations chain-integration-overview.mdx (German) --- .../indexing/chain-integration-overview.mdx | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/website/src/pages/de/indexing/chain-integration-overview.mdx b/website/src/pages/de/indexing/chain-integration-overview.mdx index 77141e82b34a..5f867a52ca5b 100644 --- a/website/src/pages/de/indexing/chain-integration-overview.mdx +++ b/website/src/pages/de/indexing/chain-integration-overview.mdx @@ -1,49 +1,49 @@ --- -title: Chain Integration Process Overview +title: Übersicht über den Prozess der Kettenintegration --- -A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. +Für Blockchain-Teams, die eine [Integration mit dem The Graph-Protokoll] (https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468) anstreben, wurde ein transparenter und auf Governance basierender Integrationsprozess entwickelt. Es handelt sich um einen 3-Phasen-Prozess, der im Folgenden zusammengefasst wird. -## Stage 1. Technical Integration +## Stufe1. Technische Integration -- Please visit [New Chain Integration](/indexing/new-chain-integration/) for information on `graph-node` support for new chains. -- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. +- Bitte besuchen Sie [Neue Kettenintegration](/indexing/new-chain-integration/), um Informationen zur `graph-node`-Unterstützung für neue Ketten zu erhalten. +- Teams leiten den Prozess der Protokollintegration ein, indem sie ein Forumsthema [hier](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) erstellen (Unterkategorie Neue Datenquellen unter Governance & GIPs). Die Verwendung der Standardvorlage für das Forum ist obligatorisch. -## Stage 2. Integration Validation +## Stufe 2. Validierung der Integration -- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON-RPC, Firehose or Substreams endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. -- Graph Indexers test the integration on The Graph's testnet. -- Core developers and Indexers monitor stability, performance, and data determinism. +- Die Teams arbeiten mit den Kernentwicklern, Graph Foundation und den Betreibern von GUIs und Netzwerk-Gateways wie [Subgraph Studio] (https://thegraph.com/studio/) zusammen, um einen reibungslosen Integrationsprozess zu gewährleisten. Dazu gehört die Bereitstellung der notwendigen Backend-Infrastruktur, wie z.B. die JSON-RPC-, Firehose- oder Substreams-Endpunkte der integrierenden Kette. Teams, die eine solche Infrastruktur nicht selbst hosten wollen, können die Community der Knotenbetreiber (Indexer) von The Graph nutzen, um dies zu tun, wobei die Foundation helfen kann. +- Graph Indexer testen die Integration im Testnetz von The Graph. +- Kernentwickler und Indexer überwachen Stabilität, Leistung und Datendeterminismus. -## Stage 3. Mainnet Integration +## Stufe 3. Mainnet-Integration -- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). -- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. +- Teams schlagen die Mainnet-Integration vor, indem sie einen Vorschlag zur Verbesserung des Graphen (Graph Improvement Proposal, GIP) einreichen und einen Pull-Request (PR) auf der [Funktionsunterstützungsmatrix] (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) initiieren (weitere Details unter dem Link). +- The Graph Council prüft den Antrag und genehmigt die Mainnet-Unterstützung, was zu einer erfolgreichen Stufe 2 und einem positiven Feedback der Community führt. --- -If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). +Wenn der Prozess entmutigend erscheint, machen Sie sich keine Sorgen! Die The Graph Foundation ist bestrebt, Integratoren zu unterstützen, indem sie die Zusammenarbeit fördert, ihnen wichtige Informationen zur Verfügung stellt und sie durch verschiedene Phasen führt, einschließlich der Navigation durch Governance-Prozesse wie Graph Improvement Proposals (GIPs) und Pull Requests. Wenn Sie Fragen haben, wenden Sie sich bitte an [info@thegraph.foundation] (mailto:info@thegraph.foundation) oder über Discord (entweder an Pedro, ein Mitglied der The Graph Foundation, IndexerDAO oder andere Kernentwickler). -Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! +Sind Sie bereit, die Zukunft von The Graph Network zu gestalten? [Starten Sie jetzt Ihren Vorschlag] (https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) und werden Sie Teil der web3-Revolution! --- -## Frequently Asked Questions +## Häufig gestellte Fragen -### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? +### 1. In welchem Zusammenhang steht dies mit der [World of Data Services GIP] (https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? -This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. +Dieser Prozess bezieht sich auf den Subgraph Data Service, der nur für neue Subgraph-`Data Sources` gilt. -### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? +### 2. Was geschieht, wenn die Unterstützung für Firehose & Substreams erst nach der Unterstützung des Netzes im Mainnet erfolgt? -This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. +Dies würde sich nur auf die Protokollunterstützung für die Indizierung von Rewards auf Subgraphen mit Substreams auswirken. Die neue Firehose-Implementierung müsste im Testnet getestet werden, wobei die für Stufe 2 in diesem GIP beschriebene Methodik anzuwenden wäre. Unter der Annahme, dass die Implementierung performant und zuverlässig ist, wäre ein PR für die [Funktionsunterstützungsmatrix] (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) erforderlich (`Substreams data sources`-Subgraph-Funktion), sowie eine neue GIP für die Protokollunterstützung für die Indizierung von Rewards. Jeder kann die PR und GIP erstellen; die Foundation würde bei der Genehmigung durch den Rat helfen. -### 3. How much time will the process of reaching full protocol support take? +### 3. Wie viel Zeit wird der Prozess bis zur vollständigen Unterstützung des Protokolls in Anspruch nehmen? -The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. +Es wird erwartet, dass die Zeit bis zum Mainnet mehrere Wochen betragen wird, je nachdem, wann die Integration entwickelt wird, ob zusätzliche Forschung erforderlich ist, ob Tests und Fehlerkorrekturen durchgeführt werden müssen und, wie immer, je nach dem Zeitpunkt des Governance-Prozesses, der ein Feedback der Community erfordert. -Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. +Die Unterstützung des Protokolls für die Indexer Rewards hängt von der Bandbreite der Beteiligten ab, um mit dem Testen, dem Sammeln von Feedback und der Bearbeitung von Beiträgen zur Kern-Codebasis fortzufahren, falls zutreffend. Dies steht in direktem Zusammenhang mit der Reife der Integration und der Reaktionsfähigkeit des Integrationsteams (bei dem es sich um das Team hinter der RPC/Firehose-Implementierung handeln kann oder auch nicht). Die Foundation steht während des gesamten Prozesses unterstützend zur Seite. -### 4. How will priorities be handled? +### 4. Wie werden die Prioritäten gehandhabt? -Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. +Ähnlich wie bei Punkt 3 hängt dies von der allgemeinen Bereitschaft und der Bandbreite der beteiligten Akteure ab. Ein Datenbeispiel: Eine neue Kette mit einer brandneuen Firehose-Implementierung kann länger dauern als Integrationen, die bereits erprobt wurden oder im Governance-Prozess weiter fortgeschritten sind. From 710aed9d09fd0656827ec4748fcb109ece3647e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:06 -0500 Subject: [PATCH 0349/1534] New translations chain-integration-overview.mdx (Russian) --- .../src/pages/ru/indexing/chain-integration-overview.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ru/indexing/chain-integration-overview.mdx b/website/src/pages/ru/indexing/chain-integration-overview.mdx index cb7944b336ae..3ee1ef3bc4bc 100644 --- a/website/src/pages/ru/indexing/chain-integration-overview.mdx +++ b/website/src/pages/ru/indexing/chain-integration-overview.mdx @@ -6,12 +6,12 @@ title: Chain Integration Process Overview ## Stage 1. Technical Integration -- Please visit [New Chain Integration](/indexing/new-chain-integration/) for information on `graph-node` support for new chains. +- Информацию о поддержке `graph-node` для новых чейнов см. на странице [Интеграция новых чейнов](/indexing/new-chain-integration/). - Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. ## Stage 2. Integration Validation -- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON-RPC, Firehose or Substreams endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Команды сотрудничают с разработчиками ядра, Graph Foundation и операторами графических интерфейсов и сетевых шлюзов, такими как [Subgraph Studio](https://thegraph.com/studio/), чтобы обеспечить плавный процесс интеграции. Это предполагает предоставление необходимой серверной инфраструктуры, такой как конечные точки JSON-RPC, Firehose или Substreams, интегрирующей чейны. Команды, желающие избежать самостоятельного размещения такой инфраструктуры, могут использовать для этого сообщество операторов нод (индексаторов) The Graph, в чем может помочь Foundation. - Graph Indexers test the integration on The Graph's testnet. - Core developers and Indexers monitor stability, performance, and data determinism. @@ -38,7 +38,7 @@ This process is related to the Subgraph Data Service, applicable only to new Sub This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. -### 3. How much time will the process of reaching full protocol support take? +### 3. Сколько времени займет процесс достижения полной поддержки протокола? The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. @@ -46,4 +46,4 @@ Protocol support for indexing rewards depends on the stakeholders' bandwidth to ### 4. How will priorities be handled? -Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. +Подобно пункту #3, это будет зависеть от общей готовности и пропускной способности заинтересованных сторон. Например, новый чейн с совершенно новой реализацией Firehose может занять больше времени, чем интеграции, которые уже прошли испытания или находятся на более поздней стадии процесса управления. From 3864dfc036c1a6b21f630634ca353c8ff22d3102 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:09 -0500 Subject: [PATCH 0350/1534] New translations chain-integration-overview.mdx (Chinese Simplified) --- .../pages/zh/indexing/chain-integration-overview.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/src/pages/zh/indexing/chain-integration-overview.mdx b/website/src/pages/zh/indexing/chain-integration-overview.mdx index 3caa15d11f90..425fdaced82a 100644 --- a/website/src/pages/zh/indexing/chain-integration-overview.mdx +++ b/website/src/pages/zh/indexing/chain-integration-overview.mdx @@ -7,22 +7,22 @@ title: 链集成过程概述 ## 阶段1:技术集成 - Please visit [New Chain Integration](/indexing/new-chain-integration/) for information on `graph-node` support for new chains. -- 团队通过在[here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71)(治理与GIPs下的新数据源子类别)创建一个论坛帖子来启动协议集成过程。强制使用默认的论坛模板。 +- 团队通过在[此处](https://forum.thegraph.com/c/governance-gips/new-chain-support/71)(治理与GIPs下的新数据源子类别)创建一个论坛帖子来启动协议集成过程。强制使用默认的论坛模板。 ## 阶段2:集成验证 - Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON-RPC, Firehose or Substreams endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. -- Graph索引人在The Graph的测试网上测试集成。 +- Graph索引人在Graph的测试网上测试集成。 - 核心开发者和索引人监控稳定性、性能和数据确定性。 ## 阶段3:主网集成 -- 团队通过提交一个Graph Improvement Proposal (GIP) 并在 [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) 上发起一个拉取请求 (PR) 来提议主网集成(更多详细信息请查看链接)。 -- The Graph Council(The Graph理事会)审查请求,并在成功完成第2阶段并获得积极社区反馈的情况下批准主网支持。 +- 团队通过提交一个Graph改进建议(GIP) 并在 特征支持矩阵](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) 上发起一个拉取请求 (PR) 来提议主网集成(更多详细信息请查看链接)。 +- Graph Council(Graph理事会)审查请求,并在成功完成第2阶段并获得积极社区反馈的情况下批准主网支持。 --- -如果整个流程看起来令人望而生畏,不用担心!The Graph Foundation致力于通过促进合作、提供重要信息并指导各个阶段的过程来支持集成者,包括引导他们参与治理流程,如Graph Improvement Proposals (GIPs) 和拉取请求。如果您有任何问题,请通过 [info@thegraph.foundation](mailto:info@thegraph.foundation) 或通过Discord(可以联系Pedro、The Graph Foundation成员、IndexerDAO或其他核心开发者)与我们联系。 +如果整个流程看起来令人望而生畏,不用担心!Graph 基金会致力于通过促进合作、提供重要信息并指导各个阶段的过程来支持集成者,包括引导他们参与治理流程,如Graph改进建议(GIPs) 和拉取请求。如果您有任何问题,请通过 [info@thegraph.foundation](mailto:info@thegraph.foundation) 或通过Discord(可以联系Pedro、Graph基金会成员、索引人DAO或其他核心开发者)与我们联系。 准备好塑造The Graph Network的未来了吗?[立即开始您的提案](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md),成为Web3革命的一部分吧! @@ -36,7 +36,7 @@ title: 链集成过程概述 ### 2. 如果在主网上支持网络之后再支持 Firehose 和 Substreams,会发生什么情况? -这只会影响 Substreams 驱动的子图上的索引奖励的协议支持。新的 Firehose 实现需要在测试网上进行测试,遵循了本 GIP 中第二阶段所概述的方法论。同样地,假设实现是高性能且可靠的,那么需要在 [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) 上提出 PR(`Substreams 数据源` 子图特性),以及一个新的 GIP 来支持索引奖励的协议。任何人都可以创建这个 PR 和 GIP;基金会将协助获得理事会的批准。 +这只会影响 Substreams 驱动的子图上的索引奖励的协议支持。新的 Firehose 实现需要在测试网上进行测试,遵循了本 GIP 中第二阶段所概述的方法论。同样地,假设实现是高性能且可靠的,那么需要在 [特征支持矩阵](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) 上提出 PR(`Substreams 数据源` 子图特性),以及一个新的 GIP 来支持索引奖励的协议。任何人都可以创建这个 PR 和 GIP;基金会将协助获得理事会的批准。 ### 3. How much time will the process of reaching full protocol support take? From cfef0db26131bbf17491a14f8707a88e6234f701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:10 -0500 Subject: [PATCH 0351/1534] New translations chain-integration-overview.mdx (Urdu (Pakistan)) --- website/src/pages/ur/indexing/chain-integration-overview.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ur/indexing/chain-integration-overview.mdx b/website/src/pages/ur/indexing/chain-integration-overview.mdx index e348639e9efa..f02f86479860 100644 --- a/website/src/pages/ur/indexing/chain-integration-overview.mdx +++ b/website/src/pages/ur/indexing/chain-integration-overview.mdx @@ -7,7 +7,8 @@ title: چین انٹیگریشن کے عمل کا جائزہ ## مرحلہ 1. تکنیکی انٹیگریشن - Please visit [New Chain Integration](/indexing/new-chain-integration/) for information on `graph-node` support for new chains. -- ٹیمیں فورم تھریڈ بنا کر پروٹوکول انٹیگریشن کا عمل شروع کرتی ہیں [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71)(گورننس اور GIPs کے تحت نئے ڈیٹا ذرائع ذیلی زمرہ) ۔ پہلے سے طے شدہ فورم ٹیمپلیٹ کا استعمال لازمی ہے. +- ٹیمیں فورم تھریڈ بنا کر پروٹوکول انٹیگریشن کا عمل شروع کرتی ہیں [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71)(گورننس اور GIPs کے تحت نئے ڈیٹا ذرائع ذیلی زمرہ) + ۔ پہلے سے طے شدہ فورم ٹیمپلیٹ کا استعمال لازمی ہے. ## مرحلہ 2۔ انٹیگریشن کی توثیق From 5c4201589a95882d1f82b91134f211c3590dc6cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:12 -0500 Subject: [PATCH 0352/1534] New translations chain-integration-overview.mdx (Hindi) --- website/src/pages/hi/indexing/chain-integration-overview.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/indexing/chain-integration-overview.mdx b/website/src/pages/hi/indexing/chain-integration-overview.mdx index c8bac7343bad..6a7c06a71a07 100644 --- a/website/src/pages/hi/indexing/chain-integration-overview.mdx +++ b/website/src/pages/hi/indexing/chain-integration-overview.mdx @@ -6,7 +6,7 @@ A transparent and governance-based integration process was designed for blockcha ## Stage 1. Technical Integration -- कृपया [New Chain Integration](/indexing/new-chain-integration/) पर जाएं नई chains के लिए `graph-node` समर्थन के बारे में जानकारी के लिए. +- कृपया `ग्राफ-नोड` द्वारा नए chain समर्थन के लिए [New Chain इंटीग्रेशन](/indexing/new-chain-integration/) पर जाएं। - Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. ## Stage 2. Integration Validation From 1eb0e3bc79573cd0d28aa6e223cefc4d59731132 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:13 -0500 Subject: [PATCH 0353/1534] New translations new-chain-integration.mdx (Romanian) --- .../ro/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/ro/indexing/new-chain-integration.mdx b/website/src/pages/ro/indexing/new-chain-integration.mdx index f5c5cba520d5..7c9f8a5c65c4 100644 --- a/website/src/pages/ro/indexing/new-chain-integration.mdx +++ b/website/src/pages/ro/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 11ada0dba8cf73c97857ebc234392adfd25c689a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:14 -0500 Subject: [PATCH 0354/1534] New translations new-chain-integration.mdx (French) --- .../fr/indexing/new-chain-integration.mdx | 66 ++++++++----------- 1 file changed, 28 insertions(+), 38 deletions(-) diff --git a/website/src/pages/fr/indexing/new-chain-integration.mdx b/website/src/pages/fr/indexing/new-chain-integration.mdx index be429aaa1369..9ac16c2a71e9 100644 --- a/website/src/pages/fr/indexing/new-chain-integration.mdx +++ b/website/src/pages/fr/indexing/new-chain-integration.mdx @@ -1,80 +1,70 @@ --- -title: New Chain Integration +title: Intégration d'une Nouvelle Chaîne --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Les chaînes peuvent apporter le support des subgraphs à leur écosystème en démarrant une nouvelle intégration `graph-node`. Les subgraphs sont un outil d'indexation puissant qui ouvre un monde de possibilités pour les développeurs. Graph Node indexe déjà les données des chaînes listées ici. Si vous êtes intéressé par une nouvelle intégration, il existe 2 stratégies d'intégration : 1. **EVM JSON-RPC** -2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. +2. **Firehose** : toutes les solutions d'intégration Firehose incluent Substreams, un moteur de streaming à grande échelle basé sur Firehose avec prise en charge native de `graph-node`, permettant des transformations parallélisées. -> Note that while the recommended approach is to develop a new Firehose for all new chains, it is only required for non-EVM chains. +> Notez que même si l’approche recommandée consiste à développer un nouveau Firehose pour toutes les nouvelles chaînes, elle n’est requise que pour les chaînes non EVM. -## Integration Strategies +## Stratégies d'intégration ### 1. EVM JSON-RPC -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. +Si la blockchain est équivalente à EVM et que le client/nœud expose l'API JSON-RPC EVM standard, Graph Node devrait pouvoir indexer la nouvelle chaîne. #### Tester un EVM JSON-RPC -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON-RPC methods: +Afin que Graph Node puisse ingérer des données provenant d'une chaîne EVM, le nœud RPC doit exposer les méthodes JSON-RPC EVM suivantes : - `eth_getLogs` -- `eth_call` (for historical blocks, with EIP-1898 - requires archive node) +- `eth_call` (pour les blocs historiques, avec EIP-1898 - nécessite un nœud d'archivage) - `eth_getBlockByNumber` - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(traçage limité et optionnellement requis pour Graph Node)_ -### 2. Firehose Integration +### 2. Intégration Firehose -[Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. +[Firehose](https://firehose.streamingfast.io/firehose-setup/overview) est une couche d'extraction de nouvelle génération. Elle collecte l'historique dans des fichiers plats et des flux en temps réel. La technologie Firehose remplace ces appels d'API d'interrogation par un flux de données utilisant un modèle push qui envoie les données au nœud d'indexation plus rapidement. Cela permet d'augmenter la vitesse de synchronisation et d'indexation. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. +> REMARQUE : toutes les intégrations effectuées par l'équipe StreamingFast incluent la maintenance du protocole de réplication Firehose dans la base de code de la chaîne. StreamingFast suit toutes les modifications et publie les binaires lorsque vous modifiez le code et lorsque StreamingFast modifie le code. Cela comprend la publication des binaires Firehose/Substreams pour le protocole, la maintenance des modules Substreams pour le modèle de bloc de la chaîne et la publication des binaires pour le nœud de blockchain avec instrumentation si nécessaire. -> NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. +#### Intégration des Chaînes Non-EVM -#### Specific Firehose Instrumentation for EVM (`geth`) chains +L’approche principale pour intégrer Firehose dans ces chaînes repose sur une stratégie de polling RPC. L'algorithme de polling prédit l’arrivée des nouveaux blocs et ajuste dynamiquement le taux d’interrogation, ce qui garantit une solution à très faible latence et efficace. Pour obtenir de l'aide concernant l'intégration et la maintenance du Firehose, contactez [l'équipe StreamingFast ](https://www.streamingfast.io/firehose-integration-program). Les nouvelles chaînes et leurs intégrateurs apprécieront [la gestion des forks](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) et les capacités massives d'indexation parallélisée que Firehose et Substreams apportent à leur écosystème. -For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. +#### Instrumentation spécifique pour les Blockchains EVM (`geth`) + +Pour les chaînes EVM, il existe un niveau de données plus approfondi qui peut être atteint grâce à `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), une collaboration entre Go-Ethereum et StreamingFast, dans la construction d'un système de traçage de transaction riche et à haut débit. Le Live Tracer est la solution la plus complète, qui permet d'obtenir des détails sur les blocs [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425). Cela permet de nouveaux paradigmes d'indexation, comme la recherche de modèles d'événements basés sur des changements d'état, des appels, des arbres d'appels parents, ou le déclenchement d'événements basés sur des changements de variables réelles dans un contrat intelligent. ![Base block vs Extended block](/img/extended-vs-base-substreams-blocks.png) -> NOTE: This improvement upon the Firehose requires chains make use of the EVM engine `geth version 1.13.0` and up. +> REMARQUE : cette amélioration du Firehose nécessite que les chaînes utilisent le moteur EVM `geth version 1.13.0` et supérieur. -## EVM considerations - Difference between JSON-RPC & Firehose +## Considérations sur EVM - Différence entre JSON-RPC et Firehose -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +Bien que le JSON-RPC et le Firehose soient tous deux adaptés aux subgraphs, un Firehose est toujours nécessaire pour les développeurs qui souhaitent construire avec [Substreams](https://substreams.streamingfast.io). La prise en charge de Substreams permet aux développeurs de construire des [subgraphs alimentés par Substreams](/subgraphs/cookbook/substreams-powered-subgraphs/) pour la nouvelle chaîne, et a le potentiel d'améliorer les performances de vos subgraphs. De plus, Firehose - en tant que remplacement direct de la couche d'extraction JSON-RPC de `graph-node` - réduit de 90% le nombre d'appels RPC requis pour l'indexation générale. -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +- Tous ces appels et allers-retours `getLogs` sont remplacés par un seul flux arrivant au cœur de `graph-node` ; un modèle de bloc unique pour tous les subgraphs qu'il traite. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +> NOTEZ: une intégration basée sur Firehose pour les chaînes EVM nécessitera toujours que les indexeurs exécutent le nœud RPC d'archivage de la chaîne pour indexer correctement les subgraphs. Cela est dû à l'incapacité de Firehose à fournir un état de contrat intelligent généralement accessible par la méthode RPC `eth_calls`. (Il convient de rappeler que les `eth_call` ne sont pas une bonne pratique pour les développeurs) ## Configuration Graph Node -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +La configuration de Graph Node est aussi simple que la préparation de votre environnement local. Une fois votre environnement local défini, vous pouvez tester l'intégration en déployant localement un subgraph. 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL - - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. - -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ - -### Testing an EVM JSON-RPC by locally deploying a subgraph +2. Modifier [cette ligne] (https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) pour inclure le nouveau nom du réseau et l'URL JSON-RPC ou Firehose de l'EVM. -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Créez un exemple de subgraph simple. Certaines options sont ci-dessous : - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` + > Ne changez pas le nom de la var env elle-même. Il doit rester `ethereum`  même si le nom du réseau est différent. -Graph Node devrait synchroniser le subgraph déployé s'il n'y a pas d'erreurs. Laissez-lui le temps de se synchroniser, puis envoyez des requêtes GraphQL au point de terminaison de l'API indiqué dans les journaux. +3. Exécutez un nœud IPFS ou utilisez celui utilisé par The Graph : https://api.thegraph.com/ipfs/ -## Substreams-powered Subgraphs +## Subgraphs alimentés par des substreams -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +Pour les intégrations Firehose/Substreams pilotées par StreamingFast, la prise en charge de base des modules Substreams fondamentaux (par exemple, les transactions décodées, les logs et les événements smart-contract) et les outils codegen Substreams sont inclus. Ces outils permettent d'activer des [subgraphs alimentés par Substreams](/substreams/sps/introduction/). Suivez le [Guide pratique](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) et exécutez `substreams codegen subgraph` pour expérimenter les outils codegen par vous-même. From 8ea62fff0c4e44af919c6b77a6da62d5f544e4ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:15 -0500 Subject: [PATCH 0355/1534] New translations new-chain-integration.mdx (Spanish) --- .../es/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/es/indexing/new-chain-integration.mdx b/website/src/pages/es/indexing/new-chain-integration.mdx index d8942f2818d3..070442ca3a6e 100644 --- a/website/src/pages/es/indexing/new-chain-integration.mdx +++ b/website/src/pages/es/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, en una solicitud por lotes JSON-RPC -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Crea un subgrafo simple de prueba. Algunas opciones están a continuación: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node debería sincronizar el subgrafo implementado si no hay errores. Dale tiempo para que se sincronice y luego envíe algunas queries GraphQL al punto final de la API impreso en los registros. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From d18791057d54412bda1af29b41bfd353c37964bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:16 -0500 Subject: [PATCH 0356/1534] New translations new-chain-integration.mdx (Arabic) --- .../ar/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/ar/indexing/new-chain-integration.mdx b/website/src/pages/ar/indexing/new-chain-integration.mdx index 90fd989fffce..60ac68afc55f 100644 --- a/website/src/pages/ar/indexing/new-chain-integration.mdx +++ b/website/src/pages/ar/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`، ضمن طلب دفعة استدعاء الإجراء عن بُعد باستخدام تمثيل كائنات جافا سكريبت -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [استنسخ عقدة الغراف](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. قم بإنشاء مثالًا بسيطًا للغراف الفرعي. بعض الخيارات المتاحة هي كالتالي: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -إذا لم تكن هناك أخطاء يجب أن يقوم عقدة الغراف بمزامنة الغراف الفرعي المنشور. قم بمنحه بعض الوقت لإتمام عملية المزامنة، ثم قم بإرسال بعض استعلامات لغة الإستعلام للغراف (GraphQL) إلى نقطة نهاية واجهة برمجة التطبيقات الموجودة في السجلات. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 7c8412d29ff62dcc448436ef9fd7461155b8d256 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:17 -0500 Subject: [PATCH 0357/1534] New translations new-chain-integration.mdx (Czech) --- .../cs/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/cs/indexing/new-chain-integration.mdx b/website/src/pages/cs/indexing/new-chain-integration.mdx index 082548e9e502..9745156c2c00 100644 --- a/website/src/pages/cs/indexing/new-chain-integration.mdx +++ b/website/src/pages/cs/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Vytvořte jednoduchý příklad podgrafu. Některé možnosti jsou uvedeny níže: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Pokud nedošlo k chybám, měl by uzel Graf synchronizovat nasazený podgraf. Dejte mu čas na synchronizaci a poté odešlete několik dotazů GraphQL na koncový bod API vypsaný v protokolech. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From bf9d031c3d3cbcde258eb360f78302141c91b1db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:18 -0500 Subject: [PATCH 0358/1534] New translations new-chain-integration.mdx (German) --- .../de/indexing/new-chain-integration.mdx | 72 ++++++++----------- 1 file changed, 31 insertions(+), 41 deletions(-) diff --git a/website/src/pages/de/indexing/new-chain-integration.mdx b/website/src/pages/de/indexing/new-chain-integration.mdx index f5c5cba520d5..7b577d246783 100644 --- a/website/src/pages/de/indexing/new-chain-integration.mdx +++ b/website/src/pages/de/indexing/new-chain-integration.mdx @@ -1,80 +1,70 @@ --- -title: New Chain Integration +title: Integration neuer Ketten --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Ketten können die Unterstützung von Subgraphen in ihr Ökosystem einbringen, indem sie eine neue `graph-node` Integration starten. Subgraphen sind ein leistungsfähiges Indizierungswerkzeug, das Entwicklern eine Welt voller Möglichkeiten eröffnet. Graph Node indiziert bereits Daten von den hier aufgeführten Ketten. Wenn Sie an einer neuen Integration interessiert sind, gibt es 2 Integrationsstrategien: 1. **EVM JSON-RPC** -2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. +2. **Firehose**: Alle Firehose-Integrationslösungen umfassen Substreams, eine groß angelegte Streaming-Engine auf der Grundlage von Firehose mit nativer `graph-node`-Unterstützung, die parallelisierte Transformationen ermöglicht. -> Note that while the recommended approach is to develop a new Firehose for all new chains, it is only required for non-EVM chains. +> Beachten Sie, dass es zwar empfohlen wird, einen neuen Firehose für alle neuen Ketten zu entwickeln, dies aber nur für Nicht-EVM-Ketten erforderlich ist. -## Integration Strategies +## Strategien zur Integration ### 1. EVM JSON-RPC -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. +Wenn die Blockchain EVM-äquivalent ist und der Client/Knoten die standardmäßige EVM-JSON-RPC-API bereitstellt, sollte Graph Node in der Lage sein, die neue Kette zu indizieren. -#### Testing an EVM JSON-RPC +#### Testen eines EVM JSON-RPC -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON-RPC methods: +Damit Graph Node Daten aus einer EVM-Kette aufnehmen kann, muss der RPC-Knoten die folgenden EVM JSON-RPC-Methoden bereitstellen: - `eth_getLogs` -- `eth_call` (for historical blocks, with EIP-1898 - requires archive node) +- `eth_call` (für historische Blöcke, mit EIP-1898 - erfordert Archivknoten) - `eth_getBlockByNumber` - `eth_getBlockByHash` - `net_version` -- `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `eth_getTransactionReceipt`, in einem JSON-RPC-Batch-Antrag +- `trace_filter`  _(begrenztes Tracing und optional erforderlich für Graph Node)_ ### 2. Firehose Integration -[Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. +[Firehose] (https://firehose.streamingfast.io/firehose-setup/overview) ist eine Extraktionsschicht der nächsten Generation. Sie sammelt den Verlauf in Flat Files und Streams in Echtzeit. Die Firehose-Technologie ersetzt die abfragenden API-Aufrufe durch einen Datenstrom, der ein Push-Modell verwendet, das Daten schneller an den Indexer-Knoten sendet. Dies trägt dazu bei, die Geschwindigkeit der Synchronisierung und Indizierung zu erhöhen. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. +> HINWEIS: Alle vom StreamingFast-Team durchgeführten Integrationen beinhalten die Wartung des Firehose-Replikationsprotokolls in der Codebasis der Kette. StreamingFast verfolgt alle Änderungen und gibt Binärdateien frei, wenn Sie den Code ändern und wenn StreamingFast den Code ändert. Dies umfasst die Freigabe von Firehose/Substreams-Binärdateien für das Protokoll, die Wartung von Substreams-Modulen für das Blockmodell der Kette und die Freigabe von Binärdateien für den Blockchain-Knoten mit Instrumentierung, falls erforderlich. -> NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. +#### Integration für Nicht-EVM-Ketten -#### Specific Firehose Instrumentation for EVM (`geth`) chains +Die primäre Methode zur Integration des Firehose in Ketten besteht in der Verwendung einer RPC-Polling-Strategie. Unser Polling-Algorithmus sagt voraus, wann ein neuer Block eintreffen wird und erhöht die Rate, mit der er nach einem neuen Block in der Nähe dieses Zeitpunkts sucht, was eine sehr effiziente Lösung mit geringer Latenz darstellt. Wenn Sie Hilfe bei der Integration und Wartung von Firehose benötigen, wenden Sie sich an das [StreamingFast-Team] (https://www.streamingfast.io/firehose-integration-program). Neue Ketten und ihre Integratoren werden die [Fork-Bewusstsein](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) und die massiv parallelisierten Indexierungsfähigkeiten zu schätzen wissen, die Firehose und Substreams in ihr Ökosystem einbringen. -For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. +#### Spezifische Instrumentierung für EVM (`geth`) Ketten -![Base block vs Extended block](/img/extended-vs-base-substreams-blocks.png) +Für EVM-Ketten gibt es eine tiefere Ebene von Daten, die durch den `geth` [Live-Tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), eine Zusammenarbeit zwischen Go-Ethereum und StreamingFast, beim Aufbau eines durchsatzstarken und umfangreichen Transaktionsverfolgungssystems erreicht werden kann. Der Live-Tracer ist die umfassendste Lösung, die zu [erweiterten](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) Blockdetails führt. Dies ermöglicht neue Indizierungsparadigmen, wie den Musterabgleich von Ereignissen auf der Grundlage von Zustandsänderungen, Aufrufen, übergeordneten Aufrufbäumen oder das Auslösen von Ereignissen auf der Grundlage von Änderungen der tatsächlichen Variablen in einem Smart Contract. -> NOTE: This improvement upon the Firehose requires chains make use of the EVM engine `geth version 1.13.0` and up. +![Basisblock vs. Erweiterter Block](/img/extended-vs-base-substreams-blocks.png) -## EVM considerations - Difference between JSON-RPC & Firehose +> HINWEIS: Diese Verbesserung des Firehose erfordert, dass die Ketten die EVM-Engine `geth Version 1.13.0` und höher verwenden. -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +## EVM-Überlegungen - Unterschied zwischen JSON-RPC und Firehose -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +Während JSON-RPC und Firehose beide für Subgraphen geeignet sind, ist für Entwickler, die mit [Substreams](https://substreams.streamingfast.io) bauen wollen, immer ein Firehose erforderlich. Die Unterstützung von Substreams ermöglicht es Entwicklern, [Substreams-betriebene Subgraphen](/subgraphs/cookbook/substreams-powered-subgraphs/) für die neue Kette zu bauen, und hat das Potenzial, die Leistung Ihrer Subgraphen zu verbessern. Darüber hinaus reduziert Firehose - als Ersatz für die JSON-RPC-Extraktionsschicht von `graph-node` - die Anzahl der RPC-Aufrufe, die für die allgemeine Indizierung erforderlich sind, um 90%. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +- All diese `getLogs`-Aufrufe und Roundtrips werden durch einen einzigen Stream ersetzt, der im Herzen von `graph-node` ankommt; ein einziges Blockmodell für alle Subgraphen, die es verarbeitet. -## Graph Node Configuration +> HINWEIS: Bei einer Firehose-basierten Integration für EVM-Ketten müssen Indexer weiterhin den Archiv-RPC-Knoten der Kette ausführen, um Subgraphen ordnungsgemäß zu indizieren. Dies liegt daran, dass der Firehose nicht in der Lage ist, den Smart-Contract-Status bereitzustellen, der normalerweise über die RPC-Methode „eth_call“ zugänglich ist. (Es ist erwähnenswert, dass `eth_calls` keine gute Praxis für Entwickler sind) -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +## Graph-Node Konfiguration -1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) +Die Konfiguration von Graph Node ist so einfach wie die Vorbereitung Ihrer lokalen Umgebung. Sobald Ihre lokale Umgebung eingerichtet ist, können Sie die Integration testen, indem Sie einen Subgraphen lokal bereitstellen. -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +1. [Graph Node klonen](https://github.com/graphprotocol/graph-node) - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. +2. Ändern Sie [diese Zeile] (https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22), um den neuen Netzwerknamen und die EVM JSON-RPC- oder Firehose-konforme URL aufzunehmen -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ + > Ändern Sie nicht den Namen der Env-Variable selbst. Er muss `ethereum` bleiben, auch wenn der Netzwerkname anders lautet. -### Testing an EVM JSON-RPC by locally deploying a subgraph +3. Führen Sie einen IPFS-Knoten aus oder verwenden Sie den von The Graph verwendeten: https://api.thegraph.com/ipfs/ -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` +## Substreams-getriebene Subgraphen -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - -## Substreams-powered Subgraphs - -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +Für StreamingFast-geführte Firehose/Substreams-Integrationen sind grundlegende Unterstützung für grundlegende Substreams-Module (z. B. entschlüsselte Transaktionen, Protokolle und Smart-Contract-Ereignisse) und Substreams-Codegen-Tools enthalten. Mit diesen Tools können Sie [Substreams-getriebene Subgraphen](/substreams/sps/introduction/) aktivieren. Folgen Sie dem [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) und führen Sie `substreams codegen subgraph` aus, um die codegen-Tools selbst zu erleben. From d916151840dbbac46bc0ef7ed73e22255b501927 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:19 -0500 Subject: [PATCH 0359/1534] New translations new-chain-integration.mdx (Italian) --- .../it/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/it/indexing/new-chain-integration.mdx b/website/src/pages/it/indexing/new-chain-integration.mdx index f5c5cba520d5..7c9f8a5c65c4 100644 --- a/website/src/pages/it/indexing/new-chain-integration.mdx +++ b/website/src/pages/it/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From dc0a04a9e26e4aac8c21e1a929463bf5c6fb92eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:20 -0500 Subject: [PATCH 0360/1534] New translations new-chain-integration.mdx (Japanese) --- .../ja/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/ja/indexing/new-chain-integration.mdx b/website/src/pages/ja/indexing/new-chain-integration.mdx index 94f1424e9113..d71692271261 100644 --- a/website/src/pages/ja/indexing/new-chain-integration.mdx +++ b/website/src/pages/ja/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. 簡単なサブグラフの例を作成します。 いくつかのオプションを以下に示します。 - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Nodeはエラーがない場合、デプロイされたサブグラフを同期するはずです。同期が完了するのを待ってから、ログに表示されたAPIエンドポイントに対していくつかのGraphQLクエリを送信してください。 - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 901b9fa5d9f5674006b3a9d7584d2cd7a9df29c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:21 -0500 Subject: [PATCH 0361/1534] New translations new-chain-integration.mdx (Korean) --- .../ko/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/ko/indexing/new-chain-integration.mdx b/website/src/pages/ko/indexing/new-chain-integration.mdx index f5c5cba520d5..7c9f8a5c65c4 100644 --- a/website/src/pages/ko/indexing/new-chain-integration.mdx +++ b/website/src/pages/ko/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 6f4219fc78e93ce80c9948a9f96b93a086843181 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:22 -0500 Subject: [PATCH 0362/1534] New translations new-chain-integration.mdx (Dutch) --- .../nl/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/nl/indexing/new-chain-integration.mdx b/website/src/pages/nl/indexing/new-chain-integration.mdx index f5c5cba520d5..7c9f8a5c65c4 100644 --- a/website/src/pages/nl/indexing/new-chain-integration.mdx +++ b/website/src/pages/nl/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 88251cd3fa172aea004ebb87138eca0e2f67d9c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:23 -0500 Subject: [PATCH 0363/1534] New translations new-chain-integration.mdx (Polish) --- .../pl/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/pl/indexing/new-chain-integration.mdx b/website/src/pages/pl/indexing/new-chain-integration.mdx index f5c5cba520d5..7c9f8a5c65c4 100644 --- a/website/src/pages/pl/indexing/new-chain-integration.mdx +++ b/website/src/pages/pl/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 69a1b0086a552ab47cdd7e231c76fc42476f96f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:24 -0500 Subject: [PATCH 0364/1534] New translations new-chain-integration.mdx (Portuguese) --- .../pt/indexing/new-chain-integration.mdx | 26 ++++++------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/website/src/pages/pt/indexing/new-chain-integration.mdx b/website/src/pages/pt/indexing/new-chain-integration.mdx index 8fae626b60e0..f988d296e3f1 100644 --- a/website/src/pages/pt/indexing/new-chain-integration.mdx +++ b/website/src/pages/pt/indexing/new-chain-integration.mdx @@ -25,16 +25,18 @@ Para que o Graph Node possa ingerir dados de uma chain EVM, o node RPC deve expo - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, em um pedido conjunto em JSON-RPC -- `trace_filter` (opcional, para que o Graph Node tenha apoio a handlers de chamada)\* +- `trace_filter` _(tracing limitado, e opcionalmente necessário, para o Graph Node)_ ### 2. Integração do Firehose O [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) é uma camada de extração de última geração, que coleta históricos em streams e arquivos planos em tempo real. A tecnologia do Firehose substitui estas chamadas de API com um fluxo de dados que utilizam um modelo de empurrão que envia dados ao node de indexação mais rapidamente. Isto ajuda a aumentar a velocidade da sincronização e da indexação. -O método principal de integrar o Firehose a chains é uma estratégia de polling de RPC. O nosso algoritmo de polling preverá quando um bloco novo irá chegar, e aumentará o ritmo em que ele verifica por um novo bloco quando se aproximar daquela hora, o que o faz uma solução de baixa latência muito eficiente. Para ajuda com a integração e a manutenção do Firehose, contacte a [equipa do StreamingFast](https://www.streamingfast.io/firehose-integration-program). Novas chains e os seus integrantes apreciarão a [consciência de fork](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) e as capacidades imensas de indexação paralelizada que o Firehose e os Substreams trazem ao seu ecossistema. - > NOTA: Todas as integrações feitas pela equipa da StreamingFast incluem manutenção para o protocolo de réplica do Firehose no banco de código da chain. O StreamingFast rastreia todas as mudanças e lança binários quando o código é mudado, pelo programador ou pela StreamingFast. Isto inclui o lançamento de binários do Firehose/Substreams para o protocolo, a manutenção dos módulos de Substreams para o modelo de bloco da chain, e o lançamento de binários para o node da blockchain com a instrumentação, caso necessária. +#### Integração para Chains sem ser EVM (Máquina Virtual de Ethereum) + +O método principal de integrar o Firehose a chains é uma estratégia de polling de RPC. O nosso algoritmo de polling preverá quando um bloco novo irá chegar, e aumentará o ritmo em que ele verifica por um novo bloco quando se aproximar daquela hora, o que o faz uma solução de baixa latência muito eficiente. Para ajuda com a integração e a manutenção do Firehose, contacte a [equipa do StreamingFast](https://www.streamingfast.io/firehose-integration-program). Novas chains e os seus integrantes apreciarão a [consciência de fork](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) e as capacidades imensas de indexação paralelizada que o Firehose e os Substreams trazem ao seu ecossistema. + #### Instrumentação Específica do Firehose para chains EVM (`geth`) Para chains EVM, há um nível mais profundo de dados que podem ser alcançados através do [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0) `geth`, uma colaboração entre a Go-Ethereum e a StreamingFast, na construção de um sistema de traços rico e de alto throughput. O Live Tracer é a solução mais compreensiva, o que resulta em detalhes de blocos [Estendidos](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425). Isto permite novos paradigmas de indexação, como correspondência de padrões de eventos com base em mudanças no estado, chamadas, árvores de chamadas de parentes, ou o acionamento de eventos com base nas mudanças nas próprias variáveis em um contrato inteligente. @@ -45,7 +47,7 @@ Para chains EVM, há um nível mais profundo de dados que podem ser alcançados ## Considerações de EVM - Diferença entre JSON-RPC e Firehose -Enquanto ambos o JSON-RPC e o Firehose são próprios para subgraphs, um Firehose é sempre necessário para programadores que querem construir com [Substreams](https://substreams.streamingfast.io). Apoiar os Substreams permite que programadores construam [subgraphs movidos a Substreams](/subgraphs/cookbook/substreams-powered-subgraphs/) para a nova chain, e tem o potencial de melhorar o desempenho dos seus subgraphs. Além disto, o Firehose — como um substituto pronto para a camada de extração JSON-RPC do `graph-node` — reduz em 90% o número de chamadas RPC exigidas para indexação geral. +Enquanto ambos o JSON-RPC e o Firehose são próprios para subgraphs, um Firehose é sempre necessário para programadores que querem construir com [Substreams](https://substreams.streamingfast.io). Apoiar o Substreams permite que programadores construam [subgraphs movidos a Substreams](/subgraphs/cookbook/substreams-powered-subgraphs/) para a nova chain, e tem o potencial de melhorar o desempenho dos seus subgraphs. Além disto, o Firehose — como um substituto pronto para a camada de extração JSON-RPC do `graph-node` — reduz em 90% o número de RPCs (chamadas de procedimento remoto) exigidas para indexação geral. - Todas essas chamadas `getLogs` e roundtrips são substituídas por um único fluxo que chega no coração do `graph-node`, um modelo de bloco único para todos os subgraphs que processa. @@ -57,24 +59,12 @@ Configurar um Graph Node é tão fácil quanto preparar o seu ambiente local. Qu 1. [Clone o Graph Node](https://github.com/graphprotocol/graph-node) -2. Modifique [esta linha](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) para ela incluir o nome da nova rede e a URL do EVM JSON-RPC +2. Modifique [esta linha](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) para incluir o nome da nova rede e a URL do JSON-RPC da EVM; ou uma compatível com o Firehose > Não mude o nome do env var. Ele deve permanecer como `ethereum` mesmo se o nome da rede for diferente. 3. Execute um node IPFS ou use aquele usado pelo The Graph: https://api.thegraph.com/ipfs/ -### Como testar um JSON-RPC com a edição local de um subgraph - -1. Instale a [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Crie um subgraph de exemplo simples. Aqui estão algumas opções: - 1. O contrato inteligente e o subgraph [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) pré-inclusos são bons pontos de partida - 2. Inicie um subgraph local a partir de qualquer contrato inteligente existente ou de um ambiente de programação em solidity [com o uso do Hardhat com um plugin do Graph](https://github.com/graphprotocol/hardhat-graph) -3. Adapte o `subgraph.yaml` resultante com a mudança do  `dataSources.network` para o mesmo nome passado anteriormente ao Graph Node. -4. Crie o seu subgraph no Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Edite o seu subgraph no Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -O Graph Node deve então sincronizar o subgraph lançado caso não haja erros. Deixe-o sincronizar por um tempo, e depois envie alguns queries em GraphQL ao endpoint da API produzido pelos logs. - ## Subgraphs movidos por Substreams -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +Para integrações do Substreams ou Firehose movidas ao StreamingFast, são inclusos: apoio básico a módulos do Substreams (por exemplo: transações, logs, e eventos de contrato inteligente decodificados); e ferramentas de geração de código do Substreams. Estas ferramentas permitem a habilidade de ativar [subgraphs movidos pelo Substreams](/substreams/sps/introduction/). Siga o [Passo-a-Passo](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) e execute `substreams codegen subgraph` para sentir um gostinho das ferramentas. From de38ee9c5ecc29896da3215f9852b367e6633d78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:25 -0500 Subject: [PATCH 0365/1534] New translations new-chain-integration.mdx (Russian) --- .../ru/indexing/new-chain-integration.mdx | 74 ++++++++----------- 1 file changed, 32 insertions(+), 42 deletions(-) diff --git a/website/src/pages/ru/indexing/new-chain-integration.mdx b/website/src/pages/ru/indexing/new-chain-integration.mdx index f5c5cba520d5..427169610d41 100644 --- a/website/src/pages/ru/indexing/new-chain-integration.mdx +++ b/website/src/pages/ru/indexing/new-chain-integration.mdx @@ -1,80 +1,70 @@ --- -title: New Chain Integration +title: Интеграция новых чейнов --- -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: +Чейны могут обеспечить поддержку субграфов в своей экосистеме, начав новую интеграцию `graph-node`. Субграфы — это мощный инструмент индексирования, открывающий перед разработчиками целый мир возможностей. Graph Node уже индексирует данные из перечисленных здесь чейнов. Если Вы заинтересованы в новой интеграции, для этого существуют 2 стратегии: 1. **EVM JSON-RPC** -2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. +2. **Firehose**: все решения по интеграции Firehose включают Substreams, крупномасштабный механизм потоковой передачи на базе Firehose со встроенной поддержкой `graph-node`, позволяющий выполнять распараллеленные преобразования. -> Note that while the recommended approach is to develop a new Firehose for all new chains, it is only required for non-EVM chains. +> Обратите внимание на то, что хотя рекомендуемый подход заключается в разработке нового Firehose для всех новых чейнов, он требуется только для чейнов, не поддерживающих EVM. -## Integration Strategies +## Интеграционные стратегии ### 1. EVM JSON-RPC -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. +Если блокчейн эквивалентен EVM и клиент/нода предоставляет стандартный API EVM JSON-RPC, Graph Node должен иметь возможность индексировать новый чейн. -#### Testing an EVM JSON-RPC +#### Тестирование EVM JSON-RPC -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON-RPC methods: +Чтобы Graph Node мог принимать данные из чейна EVM, нода RPC должна предоставлять следующие методы EVM JSON-RPC: - `eth_getLogs` -- `eth_call` (for historical blocks, with EIP-1898 - requires archive node) +- `eth_call` (для исторических блоков, с EIP-1898 - требуется архивная нода) - `eth_getBlockByNumber` - `eth_getBlockByHash` - `net_version` -- `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `eth_getTransactionReceipt` в пакетном запросе JSON-RPC +- `trace_filter` _(ограниченное отслеживание, возможно, требуется для Graph Node)_ -### 2. Firehose Integration +### 2. Интеграция Firehose -[Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. +[Firehose](https://firehose.streamingfast.io/firehose-setup/overview) — это слой извлечения нового поколения. Он собирает историю в плоских файлах и передает ее в реальном времени. Технология Firehose заменяет эти вызовы API опросов потоком данных с использованием модели push, которая быстрее отправляет данные на ноду индексирования. Это помогает увеличить скорость синхронизации и индексирования. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. +> ПРИМЕЧАНИЕ: Все интеграции, выполненные командой StreamingFast, включают обслуживание протокола репликации Firehose в кодовой базе чейна. StreamingFast отслеживает любые изменения и выпускает двоичные файлы, когда Вы изменяете код и когда StreamingFast изменяет код. Сюда входит выпуск двоичных файлов Firehose/Substreams для протокола, поддержка модулей Substreams для блочной модели чейна и выпуск двоичных файлов для ноды блокчейна с оснащением инструментами, если это необходимо. -> NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. +#### Интеграция для чейнов, не поддерживающих EVM -#### Specific Firehose Instrumentation for EVM (`geth`) chains +Основной метод интеграции Firehose в чейны — использование стратегии опроса RPC. Наш алгоритм опроса предсказывает, когда поступит новый блок, и увеличивает скорость проверки наличия нового блока в ближайшее время, что делает его эффективным решением с очень низкой задержкой. За помощью по интеграции и обслуживанию Firehose обращайтесь к [команде StreamingFast] (https://www.streamingfast.io/firehose-integration-program). Новые чейны и их интеграторы по достоинству оценят [осведомленность о форках](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) и огромные возможности параллельного индексирования, которые Firehose и Substreams привносят в их экосистему. -For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. +#### Специальная оборудование для чейнов EVM (`geth`) -![Base block vs Extended block](/img/extended-vs-base-substreams-blocks.png) +Для чейнов EVM существует более глубокий уровень данных, который может быть достигнут с помощью `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), сотрудничества между Go-Ethereum и StreamingFast, для создания высокопроизводительной и богатой системы отслеживания транзакций. Live Tracer — наиболее комплексное решение, позволяющее получать [Расширенные](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) детали блоков. Это позволяет использовать новые парадигмы индексирования, такие как сопоставление шаблонов событий на основе изменений состояния, вызовов, деревьев вызовов более высокого уровня или запуск событий на основе изменений фактических переменных в смарт-контракте. -> NOTE: This improvement upon the Firehose requires chains make use of the EVM engine `geth version 1.13.0` and up. +![Базовый блок vs Расширенный блок](/img/extended-vs-base-substreams-blocks.png) -## EVM considerations - Difference between JSON-RPC & Firehose +> ПРИМЕЧАНИЕ: Для этого улучшения Firehose требуется, чтобы в чейнах использовался движок EVM `geth version 1.13.0` и выше. -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. +## Рекомендации по EVM — разница между JSON-RPC & Firehose -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. +Хотя как JSON-RPC, так и Firehose оба подходят для субграфов, Firehose всегда востребован разработчиками, желающими создавать с помощью [Substreams](https://substreams.streamingfast.io). Поддержка Substreams позволяет разработчикам создавать [субграфы на основе субпотоков](/subgraphs/cookbook/substreams-powered-subgraphs/) для нового чейна и потенциально может повысить производительность Ваших субграфов. Кроме того, Firehose — в качестве замены уровня извлечения JSON-RPC `graph-node` — сокращает на 90 % количество вызовов RPC, необходимых для общего индексирования. -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) +- Все эти вызовы `getLogs` и циклические передачи заменяются единым потоком, поступающим в сердце `graph-node`; единой блочной моделью для всех обрабатываемых ею субграфов. -## Graph Node Configuration +> ПРИМЕЧАНИЕ: Интеграция на основе Firehose для чейнов EVM по-прежнему будет требовать от Индексаторов запуска ноды архива RPC чейна для правильного индексирования субрафов. Это происходит из-за неспособности Firehose предоставить состояние смарт-контракта, обычно доступное с помощью метода RPC `eth_call`. (Стоит напомнить, что `eth_calls` не является хорошей практикой для разработчиков) -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. +## Конфигурация Graph Node -1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) +Настроить Graph Node так же просто, как подготовить локальную среду. После того, как Ваша локальная среда настроена, Вы можете протестировать интеграцию, локально развернув субграф. -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +1. [Клонировать Graph Node](https://github.com/graphprotocol/graph-node) - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. +2. Измените [эту строку](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22), чтобы включить новое имя сети и совместимый с EVM JSON-RPC или Firehose URL. -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ + > Не меняйте само имя переменной env. Оно должно оставаться `ethereum`, даже если имя сети отличается. -### Testing an EVM JSON-RPC by locally deploying a subgraph +3. Запустите ноду IPFS или используйте ту, которая используется The Graph: https://api.thegraph.com/ipfs/ -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` +## Субграфы, работающие на основе субпотоков (Substreams) -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - -## Substreams-powered Subgraphs - -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. +Для интеграции Firehose/Substreams под управлением StreamingFast включена базовая поддержка фундаментальных модулей Substreams (например, декодированные транзакции, логи и события смарт-контрактов) и инструментов генерации кодов Substreams. Эти инструменты позволяют включать [субграфы на базе субпотоков](/substreams/sps/introduction/). Следуйте [Практическому руководству] (https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) и запустите `substreams codegen subgraph`, чтобы самостоятельно испробовать инструменты кодирования. From 8d4510b477ee7775480fe338d481c8d86808f938 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:26 -0500 Subject: [PATCH 0366/1534] New translations new-chain-integration.mdx (Swedish) --- .../sv/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/sv/indexing/new-chain-integration.mdx b/website/src/pages/sv/indexing/new-chain-integration.mdx index b45f9b924f50..5f1a82a49794 100644 --- a/website/src/pages/sv/indexing/new-chain-integration.mdx +++ b/website/src/pages/sv/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, i en JSON-RPC batch-begäran -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Klona Graf Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Skapa en enkel exempelsubgraf. Några alternativ är nedan: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graf Node bör synkronisera den distribuerade subgrafen om det inte finns några fel. Ge det tid att synkronisera, och skicka sedan några GraphQL-begäranden till API-slutpunkten som skrivs ut i loggarna. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 53c96f4c1a916c80b34df56cfb6d4ddec5b784b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:27 -0500 Subject: [PATCH 0367/1534] New translations new-chain-integration.mdx (Turkish) --- .../tr/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/tr/indexing/new-chain-integration.mdx b/website/src/pages/tr/indexing/new-chain-integration.mdx index 3ee656eccc5d..e5a2124dc950 100644 --- a/website/src/pages/tr/indexing/new-chain-integration.mdx +++ b/website/src/pages/tr/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, bir JSON-RPC toplu talebinde -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Graph Düğümü'nü Klonlayın](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Basit bir örnek subgraph oluşturun. Bazı seçenekler aşağıdadır: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Herhangi bir hata olmadığı takdirde Graph Düğü'mü dağıtılan subgraph'ı senkronize ediyor olmalıdır. Senkronizasyon için zaman tanıyın, ardından kayıtlarla yazdırılan API uç noktasına bazı GraphQL sorguları gönderin. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 3b506723776ea83907e8d90478d00b16df9cfac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:28 -0500 Subject: [PATCH 0368/1534] New translations new-chain-integration.mdx (Ukrainian) --- .../uk/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/uk/indexing/new-chain-integration.mdx b/website/src/pages/uk/indexing/new-chain-integration.mdx index f5c5cba520d5..7c9f8a5c65c4 100644 --- a/website/src/pages/uk/indexing/new-chain-integration.mdx +++ b/website/src/pages/uk/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 3ab7d5a65c35d51a05bd2ff0e1a1b49fc7fdcbbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:29 -0500 Subject: [PATCH 0369/1534] New translations new-chain-integration.mdx (Chinese Simplified) --- .../zh/indexing/new-chain-integration.mdx | 34 +++++++------------ 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/website/src/pages/zh/indexing/new-chain-integration.mdx b/website/src/pages/zh/indexing/new-chain-integration.mdx index 4975c18f4e62..8c2ecddb63ef 100644 --- a/website/src/pages/zh/indexing/new-chain-integration.mdx +++ b/website/src/pages/zh/indexing/new-chain-integration.mdx @@ -15,7 +15,7 @@ Chains can bring subgraph support to their ecosystem by starting a new `graph-no If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. -#### 测试EVM JSON-RPC +#### Testing an EVM JSON-RPC For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON-RPC methods: @@ -24,18 +24,20 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByNumber` - `eth_getBlockByHash` - `net_version` -- `eth_getTransactionReceipt`, 在JSON-RPC批量请求中 -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `eth_getTransactionReceipt`, in a JSON-RPC batch request +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -51,30 +53,18 @@ While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is a > NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) -## Graph Node配置 +## Graph节点配置 Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. -1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) +1. [克隆Graph节点](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. 创建一个简单的示例子图。以下是一些选项: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -如果没有错误,Graph Node应该正在同步部署的子图。给它一些时间来同步,然后向API端点发送一些GraphQL查询。 - -## Substreams-powered Subgraphs +## Substreams驱动的子图 For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 33fb0b1728a3e25ab16812682ecabf7c480e459a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:30 -0500 Subject: [PATCH 0370/1534] New translations new-chain-integration.mdx (Urdu (Pakistan)) --- .../ur/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/ur/indexing/new-chain-integration.mdx b/website/src/pages/ur/indexing/new-chain-integration.mdx index 53d24d0d1aff..c8b7aa53ea5d 100644 --- a/website/src/pages/ur/indexing/new-chain-integration.mdx +++ b/website/src/pages/ur/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [گراف نوڈ کی نقل بنائیں](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. ایک سادہ مثالی سب گراف بنائیں۔ کچھ آپشنز ذیل میں ہیں: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -اگر کوئی خرابی نہیں ہے تو گراف نوڈ کو تعینات سب گراف کو ہم آہنگ کرنا چاہیے۔ اسے مطابقت پذیری کے لیے وقت دیں، پھر لاگز میں پرنٹ کردہ API اینڈ پوائنٹ پر کچھ GraphQL کیوریز بھیجیں۔ - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 919d5921961a2e2db1af1f9ea7c18016ffa54649 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:31 -0500 Subject: [PATCH 0371/1534] New translations new-chain-integration.mdx (Vietnamese) --- .../vi/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/vi/indexing/new-chain-integration.mdx b/website/src/pages/vi/indexing/new-chain-integration.mdx index f5c5cba520d5..7c9f8a5c65c4 100644 --- a/website/src/pages/vi/indexing/new-chain-integration.mdx +++ b/website/src/pages/vi/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From 1e58eac5a8e96e41bc70b9d1e32612f83714b5bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:32 -0500 Subject: [PATCH 0372/1534] New translations new-chain-integration.mdx (Marathi) --- .../mr/indexing/new-chain-integration.mdx | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/website/src/pages/mr/indexing/new-chain-integration.mdx b/website/src/pages/mr/indexing/new-chain-integration.mdx index f5c5cba520d5..7c9f8a5c65c4 100644 --- a/website/src/pages/mr/indexing/new-chain-integration.mdx +++ b/website/src/pages/mr/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- `trace_filter` *(optionally required for Graph Node to support call handlers)* +- `trace_filter` _(limited tracing and optionally required for Graph Node)_ ### 2. Firehose Integration [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - > NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. -#### Specific Firehose Instrumentation for EVM (`geth`) chains +#### Integration for Non-EVM chains + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +#### Specific Instrumentation for EVM (`geth`) chains For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. @@ -57,24 +59,12 @@ Configuring Graph Node is as easy as preparing your local environment. Once your 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## Substreams-powered Subgraphs For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. From f62f6855d763868dcf62e0a8a6a72ddc05d9083a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:33 -0500 Subject: [PATCH 0373/1534] New translations new-chain-integration.mdx (Hindi) --- .../hi/indexing/new-chain-integration.mdx | 30 +++++++------------ 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/website/src/pages/hi/indexing/new-chain-integration.mdx b/website/src/pages/hi/indexing/new-chain-integration.mdx index 5e2c9db1a96b..e5730b5c07ed 100644 --- a/website/src/pages/hi/indexing/new-chain-integration.mdx +++ b/website/src/pages/hi/indexing/new-chain-integration.mdx @@ -25,17 +25,19 @@ Graph Node को EVM चेन से डेटा इन्गेस्ट क - `eth_getBlockByHash` - `net_version` - `eth_getTransactionReceipt`, in a JSON-RPC batch request -- trace_filter (वैकल्पिक रूप से Graph Node को कॉल हैंडलर्स का समर्थन करने के लिए आवश्यक) +- `trace_filter` _(सीमित ट्रेसिंग और विकल्पतः Graph Node के लिए आवश्यक)_ ### 2. Firehose एकीकरण [Firehose](https://firehose.streamingfast.io/firehose-setup/overview) एक अगली पीढ़ी की निष्कर्षण परत है। यह फ्लैट फ़ाइलों में इतिहास एकत्र करता है और वास्तविक समय में स्ट्रीम करता है। Firehose तकनीक उन पॉलिंग API कॉल्स को एक डेटा स्ट्रीम से बदल देती है, जो एक पुश मॉडल का उपयोग करती है, जिससे डेटा को इंडेक्सिंग नोड तक तेजी से भेजा जा सके। यह सिंकिंग और इंडेक्सिंग की गति बढ़ाने में मदद करता है। -फायरहोज़ को चेन में एकीकृत करने का प्राथमिक तरीका RPC पॉलिंग रणनीति का उपयोग करना है। हमारी पॉलिंग एल्गोरिदम नए ब्लॉक के आने का पूर्वानुमान लगाएगी और उस समय के करीब नए ब्लॉक के लिए जाँच करने की दर बढ़ा देगी, जिससे यह एक बहुत कम लेटेंसी और प्रभावी समाधान बन जाता है। फायरहोज़ के एकीकरण और रखरखाव में मदद के लिए, [स्ट्रीमिंगफास्ट टीम](https://www.streamingfast.io/firehose-integration-program) से संपर्क करें। नए चेन और उनके एकीकृतकर्ताओं को फायरहोज़ और सबस्ट्रीम द्वारा उनके पारिस्थितिकी तंत्र में लाए गए [फोर्क जागरूकता](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) और विशाल समानांतर इंडेक्सिंग क्षमताओं की सराहना होगी। - > नोट: StreamingFast टीम द्वारा की गई सभी एकीकरणों में श्रृंखला के कोडबेस में Firehose प्रतिकृति प्रोटोकॉल के लिए रखरखाव शामिल है।StreamingFast किसी भी परिवर्तन को ट्रैक करता है और जब आप कोड बदलते हैं और जब StreamingFastकोड बदलता है, तो बाइनरी जारी करता है। इसमें प्रोटोकॉल के लिए Firehose/Substreamsबाइनरी जारी करना, श्रृंखला के ब्लॉक मॉडल के लिए Substreamsमॉड्यूल को बनाए रखना, और आवश्यकता होने पर ब्लॉकचेन नोड के लिए इंस्ट्रुमेंटेशन के साथ बाइनरी जारी करना शामिल है। -#### ईवीएम (`geth`) चेन के लिए विशिष्ट फायरहोज़ इंस्ट्रूमेंटेशन +#### Integration for Non-EVM chains + +फायरहोज़ को चेन में एकीकृत करने का प्राथमिक तरीका RPC पॉलिंग रणनीति का उपयोग करना है। हमारी पॉलिंग एल्गोरिदम नए ब्लॉक के आने का पूर्वानुमान लगाएगी और उस समय के करीब नए ब्लॉक के लिए जाँच करने की दर बढ़ा देगी, जिससे यह एक बहुत कम लेटेंसी और प्रभावी समाधान बन जाता है। फायरहोज़ के एकीकरण और रखरखाव में मदद के लिए, [स्ट्रीमिंगफास्ट टीम](https://www.streamingfast.io/firehose-integration-program) से संपर्क करें। नए चेन और उनके एकीकृतकर्ताओं को फायरहोज़ और सबस्ट्रीम द्वारा उनके पारिस्थितिकी तंत्र में लाए गए [फोर्क जागरूकता](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) और विशाल समानांतर इंडेक्सिंग क्षमताओं की सराहना होगी। + +#### Specific Instrumentation for EVM (`geth`) chains EVM चेन के लिए, एक गहरे स्तर के डेटा को प्राप्त करने के लिए `geth` [लाइव-ट्रेसर](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0) का उपयोग किया जाता है, जो गो-एथेरियम और स्ट्रीमिंगफास्ट के बीच सहयोग है, जो उच्च थ्रूपुट और समृद्ध लेनदेन ट्रेसिंग प्रणाली बनाने के लिए है। लाइव ट्रेसर सबसे व्यापक समाधान है, जो [विस्तारित](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) ब्लॉक विवरण का परिणाम है। यह नए इंडेक्सिंग पैरेडाइम्स की अनुमति देता है, जैसे राज्य परिवर्तनों, कॉल्स, पैरेंट कॉल ट्रीज़ के आधार पर घटनाओं का पैटर्न मिलाना, या स्मार्ट कॉन्ट्रैक्ट में वास्तविक वेरिएबल्स में बदलाव के आधार पर घटनाओं को ट्रिगर करना। @@ -45,11 +47,11 @@ EVM चेन के लिए, एक गहरे स्तर के डे ## EVM विचार - JSON-RPC और Firehose के बीच का अंतर -जबकि JSON-RPC और Firehose दोनों सबग्राफ़ के लिए उपयुक्त हैं, Firehose हमेशा उन डेवलपर्स के लिए आवश्यक है जो [Substreams](https://substreams.streamingfast.io) के साथ निर्माण करना चाहते हैं। Substreams का समर्थन करने से डेवलपर्स को नए चेन के लिए [Substreams-समर्थित सबग्राफ़](/subgraphs/cookbook/substreams-powered-subgraphs/) बनाने की अनुमति मिलती है, और इससे आपके सबग्राफ़ के प्रदर्शन में सुधार करने की क्षमता होती है। इसके अतिरिक्त, Firehose — `graph-node` के JSON-RPC निष्कर्षण स्तर के लिए एक ड्रॉप-इन प्रतिस्थापन के रूप में — सामान्य अनुक्रमण के लिए आवश्यक RPC कॉल की संख्या को 90% तक कम करता है। +JSON-RPC और Firehose दोनों ही सबग्राफ के लिए उपयुक्त हैं, लेकिन एक Firehose हमेशा आवश्यक होता है यदि डेवलपर्स [सबस्ट्रीम](https://substreams.streamingfast.io) के साथ निर्माण करना चाहते हैं। सबस्ट्रीम का समर्थन करने से डेवलपर्स को नए chain के लिए [सबस्ट्रीम-powered सबग्राफ](/subgraphs/cookbook/substreams-powered-subgraphs/) बनाने की अनुमति मिलती है, और इसके परिणामस्वरूप आपके सबग्राफ की प्रदर्शन क्षमता में सुधार हो सकता है। इसके अतिरिक्त, Firehose — जो कि `ग्राफ-नोड` के JSON-RPC extraction layer का एक drop-in replacement है — सामान्य indexing के लिए आवश्यक RPC कॉल्स की संख्या को 90% तक घटा देता है। - सभी `getLogs` कॉल्स और राउंडट्रिप्स को एकल स्ट्रीम द्वारा प्रतिस्थापित किया जाता है, जो सीधे `graph-node` के केंद्र में पहुंचती है; यह एकल ब्लॉक मॉडल सभी सबग्राफ्स के लिए काम करता है जिन्हें यह प्रोसेस करता है। -> **NOTE**: EVM chains के लिए Firehose-based integration के लिए अभी भी Indexers को chain के संग्रह RPC node को subgraph को ठीक से index करने के लिए चलाने की आवश्यकता होगी। यह `eth_call` RPC विधि द्वारा आम तौर पर पहुंच योग्य smart contract स्थिति प्रदान करने में Firehosesकी असमर्थता के कारण है। (It's worth reminding that eth_calls are [not a good practice for developers](/)/) +> **NOTE**: EVM chains के लिए Firehose-based integration के लिए अभी भी Indexers को chain के संग्रह RPC node को subgraph को ठीक से index करने के लिए चलाने की आवश्यकता होगी। यह `eth_call` RPC विधि द्वारा आम तौर पर पहुंच योग्य smart contract स्थिति प्रदान करने में Firehosesकी असमर्थता के कारण है। (It's worth reminding that eth_calls are [not a good practice for developers](/)) ## Graph Node Configuration @@ -57,24 +59,12 @@ EVM चेन के लिए, एक गहरे स्तर के डे 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. नए network नाम और EVM JSON RPC अनुरूप URL को शामिल करने के लिए [this line] (https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) को संशोधित करें +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC or Firehose compliant URL > कृपया पर्यावरण चर ethereum को खुद नाम में बदलें नहीं। यही रहना चाहिए, चाहे network का नाम भिन्न हो। 3. एक IPFS node चलाएं या उसे The Graph द्वारा उपयोग किया जाने वाले node का उपयोग करें: https://api.thegraph.com/ipfs/ -### EVM JSON-RPC का परीक्षण करने के लिए स्थानीय रूप से एक Subgraph तैनात करना - -1. [graph-cli] स्थापित करें(https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. Pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract और subgraph एक अच्छा शुरुआती बिंदु है - 2. किसी भी मौजूदा smart contract या solidity dev वातावरण से एक local subgraph को Bootstrap करें [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. परिणामी `subgraph.yaml` को [`dataSources.network`](http://dataSources.network) को उसी नाम में बदलकर अनुकूलित करें जो पहले Graph Node पर दिया गया था। -4. अपने subgraph को Graph Node में बनाएँ: graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT -5. अपने 'subgraph' को Graph Node पर प्रकाशित करें: graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT - -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. - ## सबस्ट्रीम-संचालित सबग्राफ की सेवा -StreamingFast-के नेतृत्व वाले Firehose/Substreams इंटीग्रेशन के लिए, मूलभूत Substreams मॉड्यूल (जैसे कि डिकोडेड लेनदेन, लॉग और स्मार्ट-कॉन्ट्रैक्ट इवेंट) और Substreams कोडजन टूल्स के लिए बुनियादी समर्थन शामिल है। ये टूल [Substreams-समर्थित सबग्राफ़](https://sps.introduction) सक्षम करने की क्षमता प्रदान करते हैं। [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) का पालन करें और `substreams codegen subgraph` चलाएं ताकि आप कोडजन टूल्स का अनुभव कर सकें। +StreamingFast द्वारा संचालित Firehose/सबस्ट्रीम इंटीग्रेशन के लिए, बुनियादी सबस्ट्रीम मॉड्यूल (जैसे डिकोड किए गए लेनदेन, log और स्मार्ट-contract आयोजन) और सबस्ट्रीम कोडजेन टूल्स का बेसिक सपोर्ट शामिल है। ये टूल्स [सबस्ट्रीम-powered सबग्राफ](/substreams/sps/introduction/) को सक्षम बनाने की क्षमता प्रदान करते हैं। [ मार्गदर्शक](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) का अनुसरण करें और `सबस्ट्रीम codegen सबग्राफ` चलाकर कोडजेन टूल्स का अनुभव लें। From 40d7227b50a371f65d876169ef063a5b35f8fec8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:34 -0500 Subject: [PATCH 0374/1534] New translations supported-network-requirements.mdx (Romanian) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ro/indexing/supported-network-requirements.mdx b/website/src/pages/ro/indexing/supported-network-requirements.mdx index df15ef48d762..afbf755c0a5a 100644 --- a/website/src/pages/ro/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ro/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From 124052bcffaff00a64e97bb3c6898ae253f6aadc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:35 -0500 Subject: [PATCH 0375/1534] New translations supported-network-requirements.mdx (French) --- .../supported-network-requirements.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/fr/indexing/supported-network-requirements.mdx b/website/src/pages/fr/indexing/supported-network-requirements.mdx index 40d53ab2c69b..e22b3896b253 100644 --- a/website/src/pages/fr/indexing/supported-network-requirements.mdx +++ b/website/src/pages/fr/indexing/supported-network-requirements.mdx @@ -1,18 +1,18 @@ --- -title: Supported Network Requirements +title: Exigences du réseau pris en charge --- -| Réseau | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Réseau | Guides | Configuration requise | Récompenses d'indexation | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------: | +| Arbitrum | [Guide Baremetal ](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Guide Docker ](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | CPU 4+ coeurs
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _dernière mise à jour août 2023_ | ✅ | +| Avalanche | [Guide Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | CPU 4 cœurs / 8 threads
    Ubuntu 22.04
    16Go+ RAM
    >= 5 Tio NVMe SSD
    _dernière mise à jour août 2023_ | ✅ | +| Base | [Guide Erigon Baremetal ](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [Guide GETH Baremetal ](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [Guide GETH Docker ](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | CPU 8+ cœurs
    Debian 12/Ubuntu 22.04
    16 Go RAM
    >= 4.5To (NVME recommandé)
    _Dernière mise à jour le 14 mai 2024_ | ✅ | +| Binance | [Guide Erigon Baremetal ](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | CPU 8 cœurs / 16 threads
    Ubuntu 22.04
    >=32 Go RAM
    >= 14 Tio NVMe SSD
    _Dernière mise à jour le 22 juin 2024_ | ✅ | +| Celo | [Guide Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | CPU 4 cœurs / 8 threads
    Ubuntu 22.04
    16Go+ RAM
    >= 2 Tio NVMe SSD
    _Dernière mise à jour en août 2023_ | ✅ | +| Ethereum | [Guide Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Vitesse d'horloge supérieure par rapport au nombre de cœurs
    Ubuntu 22.04
    16 Go+ RAM
    >=3 To (NVMe recommandé)
    _dernière mise à jour août 2023_ | ✅ | +| Fantom | [Guide Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | CPU 4 cœurs / 8 threads
    Ubuntu 22.04
    16 Go + RAM
    >= 13 Tio SSD NVMe
    _dernière mise à jour août 2023_ | ✅ | +| Gnosis | [Guide Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | CPU 6 cœurs / 12 threads
    Ubuntu 22.04
    16 Go+ RAM
    >= 3 To SSD NVMe
    _dernière mise à jour août 2023_ | ✅ | +| Linea | [Guide Baremetal ](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | CPU 4+ cœurs
    Ubuntu 22.04
    16 Go+ RAM
    >= 1 To SSD NVMe
    _dernière mise à jour le 2 avril 2024_ | ✅ | +| Optimism | [Guide Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [Guide GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [Guide GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | CPU 4 cœurs / 8 threads
    Ubuntu 22.04
    16 Go + RAM
    >= SSD NVMe 8 Tio
    _dernière mise à jour août 2023_ | ✅ | +| Polygon | [Guide Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | CPU 16 cœurs
    Ubuntu 22.04
    32 Go+ RAM
    >= 10 Tio NVMe SSD
    _dernière mise à jour août 2023_ | ✅ | +| Scroll | [Guide Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Guide Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | CPU 4 cœurs / 8 threads
    Debian 12
    16 Go + RAM
    >= 1 Tio NVMe SSD
    _dernière mise à jour le 3 avril 2024_ | ✅ | From 53e705750b58cf691198f0de68089a9cfb877132 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:36 -0500 Subject: [PATCH 0376/1534] New translations supported-network-requirements.mdx (Spanish) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/es/indexing/supported-network-requirements.mdx b/website/src/pages/es/indexing/supported-network-requirements.mdx index dfebec344880..c06461d981d6 100644 --- a/website/src/pages/es/indexing/supported-network-requirements.mdx +++ b/website/src/pages/es/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Red | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Red | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From 523813e42e588c3e26c7bbd3e4eec0af25e1c013 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:37 -0500 Subject: [PATCH 0377/1534] New translations supported-network-requirements.mdx (Arabic) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ar/indexing/supported-network-requirements.mdx b/website/src/pages/ar/indexing/supported-network-requirements.mdx index 9c820d055399..811fb2a8cec7 100644 --- a/website/src/pages/ar/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ar/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| بوليجون | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| بوليجون | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From e9d31fb3adce6603da3469c5e37ff424f2e5d06e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:38 -0500 Subject: [PATCH 0378/1534] New translations supported-network-requirements.mdx (Czech) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/cs/indexing/supported-network-requirements.mdx b/website/src/pages/cs/indexing/supported-network-requirements.mdx index a81118cec231..efbee1c17750 100644 --- a/website/src/pages/cs/indexing/supported-network-requirements.mdx +++ b/website/src/pages/cs/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Síť | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Síť | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From e0db590f4ecd6123cf5a73eeed87e62769ad5171 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:39 -0500 Subject: [PATCH 0379/1534] New translations supported-network-requirements.mdx (German) --- .../supported-network-requirements.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/de/indexing/supported-network-requirements.mdx b/website/src/pages/de/indexing/supported-network-requirements.mdx index df15ef48d762..7bbfce189885 100644 --- a/website/src/pages/de/indexing/supported-network-requirements.mdx +++ b/website/src/pages/de/indexing/supported-network-requirements.mdx @@ -1,18 +1,18 @@ --- -title: Supported Network Requirements +title: Unterstützte Netzwerkanforderungen --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Netzwerk | Guides | Systemanforderungen | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Höhere Taktfrequenz im Vergleich zur Kernanzahl
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From 28ad931bf718246d0efed5530037bc6895ae5a6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:41 -0500 Subject: [PATCH 0380/1534] New translations supported-network-requirements.mdx (Italian) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/it/indexing/supported-network-requirements.mdx b/website/src/pages/it/indexing/supported-network-requirements.mdx index 7eed955d1013..88be77e74cc8 100644 --- a/website/src/pages/it/indexing/supported-network-requirements.mdx +++ b/website/src/pages/it/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| La rete | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| La rete | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From 9a2ac0ee2c93dedf3880a0a661fd38c4c8d58d1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:42 -0500 Subject: [PATCH 0381/1534] New translations supported-network-requirements.mdx (Japanese) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ja/indexing/supported-network-requirements.mdx b/website/src/pages/ja/indexing/supported-network-requirements.mdx index 6aa0c0caa16f..99ceda419e06 100644 --- a/website/src/pages/ja/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ja/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| ネットワーク | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| ネットワーク | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From fc1f885621e4cb494e805052be545e4e7d115361 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:42 -0500 Subject: [PATCH 0382/1534] New translations supported-network-requirements.mdx (Korean) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ko/indexing/supported-network-requirements.mdx b/website/src/pages/ko/indexing/supported-network-requirements.mdx index df15ef48d762..afbf755c0a5a 100644 --- a/website/src/pages/ko/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ko/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From b432d2cb1c0afe75213f22b4ec7911743b6ae7e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:43 -0500 Subject: [PATCH 0383/1534] New translations supported-network-requirements.mdx (Dutch) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/nl/indexing/supported-network-requirements.mdx b/website/src/pages/nl/indexing/supported-network-requirements.mdx index 9bfbc8d0fefd..4a5d2fa8f364 100644 --- a/website/src/pages/nl/indexing/supported-network-requirements.mdx +++ b/website/src/pages/nl/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Netwerk | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Netwerk | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From cae90c21668e12bb7a9c3880ec5adee61de1a3b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:44 -0500 Subject: [PATCH 0384/1534] New translations supported-network-requirements.mdx (Polish) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/pl/indexing/supported-network-requirements.mdx b/website/src/pages/pl/indexing/supported-network-requirements.mdx index df15ef48d762..afbf755c0a5a 100644 --- a/website/src/pages/pl/indexing/supported-network-requirements.mdx +++ b/website/src/pages/pl/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From dccabe9db27bfeaf41282d5de2a9cde8bd4e6283 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:45 -0500 Subject: [PATCH 0385/1534] New translations supported-network-requirements.mdx (Portuguese) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/pt/indexing/supported-network-requirements.mdx b/website/src/pages/pt/indexing/supported-network-requirements.mdx index d678f0534f01..c1bd4433f1d7 100644 --- a/website/src/pages/pt/indexing/supported-network-requirements.mdx +++ b/website/src/pages/pt/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Requisitos de Redes Apoiadas --- -| Rede | Guias | Requisitos de sistema | Recompensas de Indexação | -| --- | --- | --- | :-: | -| Arbitrum | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Guia Docker](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | CPU de 4+ núcleos
    Ubuntu 22.04
    16GB+ RAM
    >= SSD NVMe com mais de 8 TiB
    _última atualização em agosto de 2023_ | ✅ | -| Avalanche | [Guia Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | CPU de 4 núcleos e 8 threads
    Ubuntu 22.04
    16GB+ RAM
    SSD NVMe com mais de 5 TiB
    _última atualização em agosto de 2023_ | ✅ | -| Base | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [Guia GETH Baremetal](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [Guia GETH Docker](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | CPU de 8+ núcleos
    Debian 12/Ubuntu 22.04
    16 GB RAM
    mais 4.5TB (NVMe preferido)
    _última atualização em 14 de maio de 2024_ | ✅ | -| Binance | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | CPU de 8 núcleos e 16 threads
    Ubuntu 22.04
    16GB+ RAM
    NVMe SSD com mais de 14 TiB
    _última atualização em 22 de junho de 2024_ | ✅ | -| Celo | [Guia Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | CPU de 4 núcleos e 8 threads
    Ubuntu 22.04
    16GB+ RAM
    >= SSD NVMe com mais de 2 TiB
    _última atualização em agosto de 2023_ | ✅ | -| Ethereum | [Guia Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Frequência de clock maior que número de núcleos
    Ubuntu 22.04
    16GB+ RAM
    Mais de 3TB (NVMe recomendado)
    _última atualização em agosto de 2023_ | ✅ | -| Fantom | [Guia Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | CPU de 4 núcleos e 8 threads
    Ubuntu 22.04
    16GB+ RAM
    SSD NVMe com mais de 13 TiB
    _última atualização em agosto de 2023_ | ✅ | -| Gnosis | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | CPU de 6 núcleos e 12 threads
    Ubuntu 22.04
    16GB+ RAM
    NVMe SSD com mais de 3 TiB
    _última atualização em agosto de 2023_ | ✅ | -| Linea | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | CPU de 4+ núcleos
    Ubuntu 22.04
    16GB+ RAM
    >= SSD NVMe com mais de 1 TiB
    _última atualização em 2 de abril de 2024_ | ✅ | -| Optimism | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [Guia GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [Guia GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | CPU de 4 núcleos e 8 threads
    Ubuntu 22.04
    16GB+ RAM
    SSD NVMe com mais de 8 TiB
    _última atualização em agosto de 2023_ | ✅ | -| Polygon | [Guia Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | CPU de 16 núcleos
    Ubuntu 22.04
    32GB+ RAM
    >= SSD NVMe com mais de 10 TiB
    _última atualização em agosto de 2023_ | ✅ | -| Scroll | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Guia Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | CPU de 4 núcleos e 8 threads
    Debian 12
    16GB+ RAM
    SSD NVMe com mais de 1 TiB
    _última atualização em 3 de abril de 2024_ | ✅ | +| Rede | Guias | Requisitos de sistema | Recompensas de Indexação | +| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------: | +| Arbitrum | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Guia Docker](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | CPU de 4+ núcleos
    Ubuntu 22.04
    16GB+ RAM
    >= SSD NVMe com mais de 8 TiB
    _última atualização em agosto de 2023_ | ✅ | +| Avalanche | [Guia Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | CPU de 4 núcleos e 8 threads
    Ubuntu 22.04
    16GB+ RAM
    SSD NVMe com mais de 5 TiB
    _última atualização em agosto de 2023_ | ✅ | +| Base | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [Guia GETH Baremetal](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [Guia GETH Docker](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | CPU de 8+ núcleos
    Debian 12/Ubuntu 22.04
    16 GB RAM
    mais 4.5TB (NVMe preferido)
    _última atualização em 14 de maio de 2024_ | ✅ | +| Binance | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | CPU de 8 núcleos e 16 threads
    Ubuntu 22.04
    16GB+ RAM
    NVMe SSD com mais de 14 TiB
    _última atualização em 22 de junho de 2024_ | ✅ | +| Celo | [Guia Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | CPU de 4 núcleos e 8 threads
    Ubuntu 22.04
    16GB+ RAM
    >= SSD NVMe com mais de 2 TiB
    _última atualização em agosto de 2023_ | ✅ | +| Ethereum | [Guia Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Frequência de clock maior que número de núcleos
    Ubuntu 22.04
    16GB+ RAM
    Mais de 3TB (NVMe recomendado)
    _última atualização em agosto de 2023_ | ✅ | +| Fantom | [Guia Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | CPU de 4 núcleos e 8 threads
    Ubuntu 22.04
    16GB+ RAM
    SSD NVMe com mais de 13 TiB
    _última atualização em agosto de 2023_ | ✅ | +| Gnosis | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | CPU de 6 núcleos e 12 threads
    Ubuntu 22.04
    16GB+ RAM
    NVMe SSD com mais de 3 TiB
    _última atualização em agosto de 2023_ | ✅ | +| Linea | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | CPU de 4+ núcleos
    Ubuntu 22.04
    16GB+ RAM
    >= SSD NVMe com mais de 1 TiB
    _última atualização em 2 de abril de 2024_ | ✅ | +| Optimism | [Guia Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [Guia GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [Guia GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | CPU de 4 núcleos e 8 threads
    Ubuntu 22.04
    16GB+ RAM
    SSD NVMe com mais de 8 TiB
    _última atualização em agosto de 2023_ | ✅ | +| Polygon | [Guia Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | CPU de 16 núcleos
    Ubuntu 22.04
    32GB+ RAM
    >= SSD NVMe com mais de 10 TiB
    _última atualização em agosto de 2023_ | ✅ | +| Scroll | [Guia Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Guia Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | CPU de 4 núcleos e 8 threads
    Debian 12
    16GB+ RAM
    SSD NVMe com mais de 1 TiB
    _última atualização em 3 de abril de 2024_ | ✅ | From 070b918217fe5c551c2d6299cf25d5731826eab6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:46 -0500 Subject: [PATCH 0386/1534] New translations supported-network-requirements.mdx (Russian) --- .../supported-network-requirements.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ru/indexing/supported-network-requirements.mdx b/website/src/pages/ru/indexing/supported-network-requirements.mdx index abc7b820fde7..719759b1c8f1 100644 --- a/website/src/pages/ru/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ru/indexing/supported-network-requirements.mdx @@ -1,18 +1,18 @@ --- -title: Supported Network Requirements +title: Требования к поддерживаемым сетям --- -| Сеть | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Арбитрум | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Сеть | Гайды | Системные требования | Награды за индексирование | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-----------------------: | +| Арбитрум | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ ядраа CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _последнее обновление в августе 2023_ | ✅ | +| Avalanche | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 ядра / 8 потоков CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _последнее обновление в августе 2023_ | ✅ | +| Base | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [Гайд по GETH Baremetal](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [Гайд по GETH Docker](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ ядер CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _последнее обновление 14 мая 2024_ | ✅ | +| Binance | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 ядер / 16 потоков CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _последнее обновление 22 июня 2024_ | ✅ | +| Celo | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _последнее обновление в августе 2023_ | ✅ | +| Ethereum | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Более высокая тактовая частота по сравнению с количеством ядер
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _последнее обновление в августе 2023_ | ✅ | +| Fantom | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 ядра / 8 потоков CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _последнее обновление в августе 2023_ | ✅ | +| Gnosis | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 ядер / 12 потоков CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _последнее обновление в августе 2023_ | ✅ | +| Linea | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ ядра CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _последнее обновление 2 апреля 2024_ | ✅ | +| Optimism | [Гайд по Erigon Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [Гайд по GETH Baremetal](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [Гайд по GETH Docker](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 ядра / 8 потоков CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _последнее обновление в августе 2023_ | ✅ | +| Polygon | [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 ядра CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _последнее обновление в августе 2023_ | ✅ | +| Scroll | [Гайд по Baremetal](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Гайд по Docker](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 ядра / 8 потоков CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _последнее обновление 3 апреля 2024_ | ✅ | From a54f0b82a3608d00757fc12459aad7322c4628b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:47 -0500 Subject: [PATCH 0387/1534] New translations supported-network-requirements.mdx (Swedish) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/sv/indexing/supported-network-requirements.mdx b/website/src/pages/sv/indexing/supported-network-requirements.mdx index f7a4943afd1b..f6c91108bac9 100644 --- a/website/src/pages/sv/indexing/supported-network-requirements.mdx +++ b/website/src/pages/sv/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Nätverk | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Nätverk | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From 64a88b33d8a5ff273e829ab4fb1fe792deb772ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:48 -0500 Subject: [PATCH 0388/1534] New translations supported-network-requirements.mdx (Turkish) --- .../supported-network-requirements.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/tr/indexing/supported-network-requirements.mdx b/website/src/pages/tr/indexing/supported-network-requirements.mdx index 5329c6b9dad2..85eaad3b00c4 100644 --- a/website/src/pages/tr/indexing/supported-network-requirements.mdx +++ b/website/src/pages/tr/indexing/supported-network-requirements.mdx @@ -1,18 +1,18 @@ --- -title: Supported Network Requirements +title: Desteklenen Ağ Gereksinimleri --- -| Ağ | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Ağ | Rehberler | Sistem Gereksinimleri | Endeksleme Ödülleri | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | :-----------------: | +| Arbitrum | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ çekirdekli CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _son güncelleme Ağustos 2023_ | ✅ | +| Avalanche | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _son güncelleme Ağustos 2023_ | ✅ | +| Base | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Rehberi](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ çekirdekli CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME tercih edilir)
    _son güncelleme 14 Mayıs 2024_ | ✅ | +| Binance | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 çekirdekli / 16 iş parçacıklı CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _son güncelleme 22 Haziran 2024_ | ✅ | +| Celo | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _son güncelleme Ağustos 2023_ | ✅ | +| Ethereum | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Yüksek saat hızı, çekirdek sayısından daha önemlidir
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe önerilir)
    _son güncelleme Ağustos 2023_ | ✅ | +| Fantom | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _son güncelleme Ağustos 2023_ | ✅ | +| Gnosis | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 çekirdekli / 12 iş parçacıklı CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _son güncelleme Ağustos 2023_ | ✅ | +| Linea | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ çekirdekli CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _son güncelleme 2 Nisan 2024_ | ✅ | +| Optimism | [Erigon Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Rehberi](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _son güncelleme Ağustos 2023_ | ✅ | +| Polygon | [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 çekirdekli CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _son güncelleme Ağustos 2023_ | ✅ | +| Scroll | [Baremetal Rehberi](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Rehberi](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 çekirdekli / 8 iş parçacıklı CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _son güncelleme 3 Nisan 2024_ | ✅ | From c300bf1260c0dab9bc039940b6f765fbac49dde8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:49 -0500 Subject: [PATCH 0389/1534] New translations supported-network-requirements.mdx (Ukrainian) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/uk/indexing/supported-network-requirements.mdx b/website/src/pages/uk/indexing/supported-network-requirements.mdx index df15ef48d762..afbf755c0a5a 100644 --- a/website/src/pages/uk/indexing/supported-network-requirements.mdx +++ b/website/src/pages/uk/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From e817d51bf35272a9fbf5ff06cd8f8172c9ab7a1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:50 -0500 Subject: [PATCH 0390/1534] New translations supported-network-requirements.mdx (Chinese Simplified) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/zh/indexing/supported-network-requirements.mdx b/website/src/pages/zh/indexing/supported-network-requirements.mdx index 62bb55c63746..72c9bba12a39 100644 --- a/website/src/pages/zh/indexing/supported-network-requirements.mdx +++ b/website/src/pages/zh/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| 网络 | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| 以太坊 | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| 以太坊 | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From b380a780846cb52149b90d8e356a4dd0da070339 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:51 -0500 Subject: [PATCH 0391/1534] New translations supported-network-requirements.mdx (Urdu (Pakistan)) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ur/indexing/supported-network-requirements.mdx b/website/src/pages/ur/indexing/supported-network-requirements.mdx index f4b5a7768f13..ee345dee7c3f 100644 --- a/website/src/pages/ur/indexing/supported-network-requirements.mdx +++ b/website/src/pages/ur/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| نیٹ ورک | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| نیٹ ورک | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From d5334787278f9a30a74da26e46c00a0df2fb20f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:52 -0500 Subject: [PATCH 0392/1534] New translations supported-network-requirements.mdx (Vietnamese) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/vi/indexing/supported-network-requirements.mdx b/website/src/pages/vi/indexing/supported-network-requirements.mdx index 50cd5e88b459..a8305e895706 100644 --- a/website/src/pages/vi/indexing/supported-network-requirements.mdx +++ b/website/src/pages/vi/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Mạng lưới | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Mạng lưới | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From b5dfa98da4cc25afb60c1f02b85bf453ec07fd7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:53 -0500 Subject: [PATCH 0393/1534] New translations supported-network-requirements.mdx (Marathi) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/mr/indexing/supported-network-requirements.mdx b/website/src/pages/mr/indexing/supported-network-requirements.mdx index a1a9e0338649..8d20c31f3fc5 100644 --- a/website/src/pages/mr/indexing/supported-network-requirements.mdx +++ b/website/src/pages/mr/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| Network | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| हिमस्खलन | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| इथरियम | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| फॅन्टम | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| आशावाद | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| बहुभुज | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| Network | Guides | System Requirements | Indexing Rewards | +| -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| हिमस्खलन | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _last updated 22nd June 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| इथरियम | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| फॅन्टम | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| आशावाद | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| बहुभुज | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From cdf553e416ce3ecda58979d9dd64bd7a0ac137e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:54 -0500 Subject: [PATCH 0394/1534] New translations supported-network-requirements.mdx (Hindi) --- .../supported-network-requirements.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/hi/indexing/supported-network-requirements.mdx b/website/src/pages/hi/indexing/supported-network-requirements.mdx index 647eda3e6651..29673e7529a7 100644 --- a/website/src/pages/hi/indexing/supported-network-requirements.mdx +++ b/website/src/pages/hi/indexing/supported-network-requirements.mdx @@ -2,17 +2,17 @@ title: Supported Network Requirements --- -| नेटवर्क | Guides | System Requirements | Indexing Rewards | -| --- | --- | --- | :-: | -| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | -| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _अंतिम बार अपडेट किया गया 22 जून 2024_ | ✅ | -| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | -| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | -| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | -| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | +| नेटवर्क | Guides | System Requirements | Indexing Rewards | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | :--------------: | +| Arbitrum | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/arbitrum/docker) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Avalanche | [Docker Guide](https://docs.infradao.com/archive-nodes-101/avalanche/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 5 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Base | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/base/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/base/geth/docker) | 8+ core CPU
    Debian 12/Ubuntu 22.04
    16 GB RAM
    >= 4.5TB (NVME preffered)
    _last updated 14th May 2024_ | ✅ | +| Binance | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/binance/erigon/baremetal) | 8 core / 16 threads CPU
    Ubuntu 22.04
    >=32 GB RAM
    >= 14 TiB NVMe SSD
    _अंतिम बार अपडेट किया गया 22 जून 2024_ | ✅ | +| Celo | [Docker Guide](https://docs.infradao.com/archive-nodes-101/celo/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 2 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Ethereum | [Docker Guide](https://docs.infradao.com/archive-nodes-101/ethereum/erigon/docker) | Higher clock speed over core count
    Ubuntu 22.04
    16GB+ RAM
    >=3TB (NVMe recommended)
    _last updated August 2023_ | ✅ | +| Fantom | [Docker Guide](https://docs.infradao.com/archive-nodes-101/fantom/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 13 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Gnosis | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/gnosis/erigon/baremetal) | 6 core / 12 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 3 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Linea | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/linea/baremetal) | 4+ core CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 2nd April 2024_ | ✅ | +| Optimism | [Erigon Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/erigon/baremetal)

    [GETH Baremetal Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/baremetal)
    [GETH Docker Guide](https://docs.infradao.com/archive-nodes-101/optimism/geth/docker) | 4 core / 8 threads CPU
    Ubuntu 22.04
    16GB+ RAM
    >= 8 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Polygon | [Docker Guide](https://docs.infradao.com/archive-nodes-101/polygon/docker) | 16 core CPU
    Ubuntu 22.04
    32GB+ RAM
    >= 10 TiB NVMe SSD
    _last updated August 2023_ | ✅ | +| Scroll | [Baremetal Guide](https://docs.infradao.com/archive-nodes-101/scroll/baremetal)
    [Docker Guide](https://docs.infradao.com/archive-nodes-101/scroll/docker) | 4 core / 8 threads CPU
    Debian 12
    16GB+ RAM
    >= 1 TiB NVMe SSD
    _last updated 3rd April 2024_ | ✅ | From 817271152d8478ab999fd59a8a8a9736da2a057d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:56 -0500 Subject: [PATCH 0395/1534] New translations firehose.mdx (French) --- website/src/pages/fr/indexing/tooling/firehose.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/fr/indexing/tooling/firehose.mdx b/website/src/pages/fr/indexing/tooling/firehose.mdx index 75060e3682fa..9efc6479075d 100644 --- a/website/src/pages/fr/indexing/tooling/firehose.mdx +++ b/website/src/pages/fr/indexing/tooling/firehose.mdx @@ -2,11 +2,11 @@ title: Firehose --- -![Logo Firehose](/img/firehose-logo.png) +![Firehose Logo](/img/firehose-logo.png) -Firehose est une nouvelle technologie développée par StreamingFast en collaboration avec The Graph Foundation. Le produit offre des **des capacités et des vitesses inédites pour l'indexation des données de la blockchain** en utilisant une approche basée sur les fichiers et axée sur le streaming. +Firehose est une nouvelle technologie développée par StreamingFast en collaboration avec The Graph Foundation. Le produit offre **des capacités et des vitesses inédites pour l'indexation des données de la blockchain** en utilisant une approche basée sur les fichiers et le streaming-first. -The Graph merges into Go Ethereum/geth with the adoption of [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). +The Graph fusionne avec Go Ethereum/geth avec l'adoption de [Live Tracer avec la version v1.14.0](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). Firehose extrait, transforme et enregistre les données de la blockchain dans une stratégie très performante basée sur des fichiers. Les développeurs de blockchain peuvent ensuite accéder aux données extraites par Firehose par le biais de flux de données binaires. Firehose est destiné à remplacer la couche d'extraction de données blockchain originale de The Graph. @@ -14,11 +14,11 @@ Firehose extrait, transforme et enregistre les données de la blockchain dans un La documentation Firehose est actuellement maintenue par l'équipe StreamingFast [sur le site web de StreamingFast](https://firehose.streamingfast.io/). -### Démarrage +### Introduction -- Lisez cette [introduction à Firehose](https://firehose.streamingfast.io/introduction/firehose-overview) pour avoir un aperçu de ce que c'est et pourquoi il a été construit. -- Découvrez les [Prérequis](https://firehose.streamingfast.io/introduction/prerequisites) pour installer et déployer Firehose. +- Lisez cette [Introduction à Firehose](https://firehose.streamingfast.io/introduction/firehose-overview) pour avoir une vue d'ensemble de ce qu'il est et pourquoi il a été construit. +- Découvrez les [conditions préalables](https://firehose.streamingfast.io/introduction/prerequisites) nécessaires à l'installation et au déploiement de Firehose. -### Approfondissez vos connaissances +### Élargissez vos connaissances - Découvrez les différents [composants Firehose](https://firehose.streamingfast.io/architecture/components) disponibles. From 90404a80ba1c93166855c31cc4b6c3e64ed5a4bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:58 -0500 Subject: [PATCH 0396/1534] New translations firehose.mdx (Czech) --- website/src/pages/cs/indexing/tooling/firehose.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/cs/indexing/tooling/firehose.mdx b/website/src/pages/cs/indexing/tooling/firehose.mdx index 69d30d9a3fde..56623899121b 100644 --- a/website/src/pages/cs/indexing/tooling/firehose.mdx +++ b/website/src/pages/cs/indexing/tooling/firehose.mdx @@ -4,7 +4,7 @@ title: Firehose ![Firehose Logo](/img/firehose-logo.png) -Firehose je nová technologie vyvinutá společností StreamingFast ve spolupráci s nadací Graf Foundation. Tento produkt poskytuje **dosud nevídané schopnosti a rychlosti pro indexaci dat blockchainu** pomocí přístupu založeného na souborech a prioritního streamování. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. The Graph merges into Go Ethereum/geth with the adoption of [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). @@ -12,13 +12,13 @@ Firehose extrahuje, transformuje a ukládá data blockchainu ve vysoce výkonné ## Dokumentace Firehose -Dokumentaci ke službě Firehose v současné době spravuje tým společnosti StreamingFast [na webových stránkách StreamingFast](https://firehose.streamingfast.io/). +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). ### Začínáme -- Přečtěte si tento [Úvod do systému Firehose](https://firehose.streamingfast.io/introduction/firehose-overview), abyste získali přehled o tom, co je to a proč byl vytvořen. -- Seznamte se s [Předpoklady](https://firehose.streamingfast.io/introduction/prerequisites) pro instalaci a nasazení služby Firehose. +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. ### Rozšiřte své znalosti -- Seznamte se s různými dostupnými [komponenty Firehose](https://firehose.streamingfast.io/architecture/components). +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. From dc917ca06b1efd80dbd3dd326069727f216bb0ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:58 -0500 Subject: [PATCH 0397/1534] New translations firehose.mdx (German) --- .../pages/de/indexing/tooling/firehose.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/de/indexing/tooling/firehose.mdx b/website/src/pages/de/indexing/tooling/firehose.mdx index 0f0fdebbafd0..9ec4cc3c1908 100644 --- a/website/src/pages/de/indexing/tooling/firehose.mdx +++ b/website/src/pages/de/indexing/tooling/firehose.mdx @@ -4,21 +4,21 @@ title: Firehose ![Firehose Logo](/img/firehose-logo.png) -Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. +Firehose ist eine neue Technologie, die von StreamingFast in Zusammenarbeit mit The Graph Foundation entwickelt wurde. Das Produkt bietet **noch nie dagewesene Fähigkeiten und Geschwindigkeiten für die Indizierung von Blockchain-Daten** mit einem dateibasierten und Streaming-first-Ansatz. -The Graph merges into Go Ethereum/geth with the adoption of [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). +The Graph geht mit der Einführung von [Live Tracer mit Version v1.14.0] (https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0) in Go Ethereum/geth auf. -Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. +Firehose extrahiert, transformiert und speichert Blockchain-Daten in einer hochleistungsfähigen dateibasierten Strategie. Blockchain-Entwickler können dann auf die von Firehose extrahierten Daten über binäre Datenströme zugreifen. Firehose soll als Ersatz für die ursprüngliche Blockchain-Datenextraktionsschicht von The Graph dienen. -## Firehose Documentation +## Firehose-Dokumentation -The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). +Die Firehose-Dokumentation wird derzeit vom StreamingFast-Team [auf der StreamingFast-Website] (https://firehose.streamingfast.io/) gepflegt. -### Getting Started +### Erste Schritte -- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. -- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. +- Lesen Sie diese [Firehose-Einführung] (https://firehose.streamingfast.io/introduction/firehose-overview), um einen Überblick darüber zu erhalten, was es ist und warum es entwickelt wurde. +- Informieren Sie sich über die [Voraussetzungen](https://firehose.streamingfast.io/introduction/prerequisites) zur Installation und Bereitstellung von Firehose. -### Expand Your Knowledge +### Erweitern Sie Ihr Wissen -- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. +- Erfahren Sie mehr über die verschiedenen [Firehose-Komponenten](https://firehose.streamingfast.io/architecture/components), die verfügbar sind. From 43822c052a396f6b2a1c0900e847430249b6308d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:50:59 -0500 Subject: [PATCH 0398/1534] New translations firehose.mdx (Italian) --- website/src/pages/it/indexing/tooling/firehose.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/indexing/tooling/firehose.mdx b/website/src/pages/it/indexing/tooling/firehose.mdx index 0f0fdebbafd0..8ebcca2d0822 100644 --- a/website/src/pages/it/indexing/tooling/firehose.mdx +++ b/website/src/pages/it/indexing/tooling/firehose.mdx @@ -14,7 +14,7 @@ Firehose extracts, transforms and saves blockchain data in a highly performant f The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). -### Getting Started +### Per cominciare - Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. - Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. From 9f3d437ea8cbcb183b2cb8a91963c256ddecbd65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:00 -0500 Subject: [PATCH 0399/1534] New translations firehose.mdx (Japanese) --- website/src/pages/ja/indexing/tooling/firehose.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ja/indexing/tooling/firehose.mdx b/website/src/pages/ja/indexing/tooling/firehose.mdx index 00fa111951e1..dcb3f088e675 100644 --- a/website/src/pages/ja/indexing/tooling/firehose.mdx +++ b/website/src/pages/ja/indexing/tooling/firehose.mdx @@ -2,9 +2,9 @@ title: Firehose --- -![Firehose ロゴ](/img/firehose-logo.png) +![Firehose Logo](/img/firehose-logo.png) -Firehoseは、StreamingFastがThe Graph Foundationと共同で開発した新技術です。この製品は、**ファイルベースとストリーミングファーストのアプローチを使って、ブロックチェーンデータ**をインデックス化するための、これまでにない機能と速度を提供します。 +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. The Graph merges into Go Ethereum/geth with the adoption of [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). @@ -12,13 +12,13 @@ Firehoseはブロックチェーンのデータを抽出し、変換し、非常 ## Firehose ドキュメンテーション -Firehoseのドキュメントは現在、StreamingFastチーム[がStreamingFastのウェブサイト](https://firehose.streamingfast.io/)で管理しています。 +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). ### はじめに -- この[Firehoseの紹介](https://firehose.streamingfast.io/introduction/firehose-overview)を読んで、Firehoseとは何か、なぜ作られたのかの概要を学びましょう。 -- Firehose をインストールおよびデプロイするための [ 前提条件](https://firehose.streamingfast.io/introduction/prerequisites) について説明します。 +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. ### 知識を広げよう -- さまざまな[ファイヤーホース・コンポーネント](https://firehose.streamingfast.io/architecture/components)についてご紹介します。 +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. From 6f455c95306a88f16256109c2304862122eab54c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:03 -0500 Subject: [PATCH 0400/1534] New translations firehose.mdx (Portuguese) --- website/src/pages/pt/indexing/tooling/firehose.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/src/pages/pt/indexing/tooling/firehose.mdx b/website/src/pages/pt/indexing/tooling/firehose.mdx index a56edbb11211..3b2cd8bd07c5 100644 --- a/website/src/pages/pt/indexing/tooling/firehose.mdx +++ b/website/src/pages/pt/indexing/tooling/firehose.mdx @@ -4,21 +4,21 @@ title: Firehose ![Logo do Firehose](/img/firehose-logo.png) -O Firehose é uma nova tecnologia desenvolvida pelo StreamingFast em colaboração com a Graph Foundation. O produto providencia **capacidades e velocidades inéditas para a indexação de dados em blockchain** com uma abordagem baseada em arquivos e focada no streaming em primeiro lugar. +O Firehose é uma nova tecnologia desenvolvida pelo StreamingFast em colaboração com a Graph Foundation. O produto providencia **capacidades e velocidades sem precedentes de indexação de dados em blockchain** com um processo baseado em arquivos e focado no streaming em primeiro lugar. -O The Graph se fundiu com o Go Ethereum/geth com a adoção do [Live Tracer no lançamento v1.14.0](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). +O The Graph se junta ao Go Ethereum/geth com a adoção do [Live Tracer com a versão 1.14.0](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). O Firehose extrai, transforma e salva dados de blockchain em uma estratégia baseada em arquivos e de alto desempenho. Os programadores de blockchain podem então acessar dados extraídos pelo Firehose através de streams de dados binários. A intenção do Firehose é substituir a camada original de extração de dados de blockchain do The Graph. ## Documentação do Firehose -A documentação do Firehose é atualmente mantida pela equipa do StreamingFast no [site oficial do StreamingFast](https://firehose.streamingfast.io/). +A documentação do Firehose é atualmente mantida pela equipa do StreamingFast no seu [site oficial](https://firehose.streamingfast.io/). ### Como Começar -- Leia esta [introdução ao Firehose](https://firehose.streamingfast.io/introduction/firehose-overview) para ter uma ideia de como ele é e saber por que ele foi construído. -- Aprenda sobre os [Pré-requisitos](https://firehose.streamingfast.io/introduction/prerequisites) para instalar e editar o Firehose. +- Leia esta [introdução ao Firehose](https://firehose.streamingfast.io/introduction/firehose-overview) para saber o que ele é e por que foi construído. +- Veja os [Prerrequisitos](https://firehose.streamingfast.io/introduction/prerequisites) para a instalação e execução do Firehose. ### Expanda o Seu Conhecimento -- Aprenda sobre os [vários componentes](https://firehose.streamingfast.io/architecture/components) disponíveis do Firehose. +- Aprenda sobre os vários componentes disponíveis do Firehose [aqui](https://firehose.streamingfast.io/architecture/components). From 60dedd255dccb086709e619c96f197e3b88439c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:04 -0500 Subject: [PATCH 0401/1534] New translations firehose.mdx (Russian) --- .../src/pages/ru/indexing/tooling/firehose.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ru/indexing/tooling/firehose.mdx b/website/src/pages/ru/indexing/tooling/firehose.mdx index 1986d99aa807..b8859a10682f 100644 --- a/website/src/pages/ru/indexing/tooling/firehose.mdx +++ b/website/src/pages/ru/indexing/tooling/firehose.mdx @@ -4,21 +4,21 @@ title: Firehose ![Firehose Logo](/img/firehose-logo.png) -Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. +Firehose — это новая технология, разработанная StreamingFast в сотрудничестве с The Graph Foundation. Продукт предоставляет **ранее невиданные возможности и скорости для индексации данных блокчейна** с использованием подхода на основе файлов и потоковой передачи. -The Graph merges into Go Ethereum/geth with the adoption of [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). +The Graph объединяется с Go Ethereum/geth с появлением [Live Tracer с версией v1.14.0] (https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). -Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. +Firehose извлекает, преобразует и сохраняет данные блокчейна с помощью высокопроизводительной файловой стратегии. Разработчики блокчейна могут затем получить доступ к данным, извлеченным Firehose, через потоки двоичных данных. Firehose призван заменить исходный уровень извлечения данных блокчейна The Graph. -## Firehose Documentation +## Документация Firehose -The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). +Документация Firehose в настоящее время поддерживается командой StreamingFast [на веб-сайте StreamingFast[ (https://firehose.streamingfast.io/). ### Начало работы -- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. -- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. +- Прочтите [Введение в Firehose](https://firehose.streamingfast.io/introduction/firehose-overview), чтобы получить общее представление о том, что это такое и почему было создано. +- Узнайте о [предварительных условиях](https://firehose.streamingfast.io/introduction/prequires) для установки и развертывания Firehose. -### Expand Your Knowledge +### Расширьте свои знания -- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. +- Узнайте о различных доступных [компонентах Firehose](https://firehose.streamingfast.io/architecture/components). From d1128e412a867218abb02412bffe9569de5cb700 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:05 -0500 Subject: [PATCH 0402/1534] New translations firehose.mdx (Turkish) --- website/src/pages/tr/indexing/tooling/firehose.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/src/pages/tr/indexing/tooling/firehose.mdx b/website/src/pages/tr/indexing/tooling/firehose.mdx index 108ad3ae8199..686b37df1c43 100644 --- a/website/src/pages/tr/indexing/tooling/firehose.mdx +++ b/website/src/pages/tr/indexing/tooling/firehose.mdx @@ -2,9 +2,9 @@ title: Firehose --- -![Firehose Logosu](/img/firehose-logo.png) +![Firehose Logo](/img/firehose-logo.png) -Firehose, StreamingFast tarafından Graph Vakfı ile birlikte geliştirdiği yeni bir teknolojidir. Ürün, dosya tabanlı ve akış öncelikli bir yaklaşım kullanarak **blok zinciri verilerini indekslemek için daha önce görülmemiş olanaklar ve hızlar** sağlamaktadır. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. The Graph merges into Go Ethereum/geth with the adoption of [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). @@ -12,13 +12,13 @@ Firehose, blok zinciri verilerini yüksek performanslı dosya tabanlı bir strat ## Firehose Dökümantasyonu -Firehose dökümantasyonu şu anda StreamingFast ekibi tarafından [StreamingFast web sitesinde](https://firehose.streamingfast.io/) tutulmaktadır. +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). ### Buradan Başlayın -- Firehose'un ne olduğu ve neden oluşturulduğu hakkında genel bilgi edinmek adına bu [Firehose tanıtım yazısını](https://firehose.streamingfast.io/introduction/firehose-overview) okuyun. -- Firehose'u yüklemek ve dağıtmak için [Ön Koşullar](https://firehose.streamingfast.io/introduction/prerequisites) hakkında bilgi edinin. +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. ### Bilgi Dağarcığınızı Genişletin -- Mevcut farklı [Firehose bileşenleri](https://firehose.streamingfast.io/architecture/components) hakkında bilgi edinin. +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. From 19e02e088c96d7b6420c5549cd40a95a745d29d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:08 -0500 Subject: [PATCH 0403/1534] New translations firehose.mdx (Marathi) --- website/src/pages/mr/indexing/tooling/firehose.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/indexing/tooling/firehose.mdx b/website/src/pages/mr/indexing/tooling/firehose.mdx index 6b643f84a0e0..333a00c1b32c 100644 --- a/website/src/pages/mr/indexing/tooling/firehose.mdx +++ b/website/src/pages/mr/indexing/tooling/firehose.mdx @@ -1,5 +1,5 @@ --- -title: Firehose +title: फायरहोस --- ![Firehose Logo](/img/firehose-logo.png) From 50f3d797b33fb131c6de740806b7bf470d60fe97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:09 -0500 Subject: [PATCH 0404/1534] New translations firehose.mdx (Hindi) --- website/src/pages/hi/indexing/tooling/firehose.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/src/pages/hi/indexing/tooling/firehose.mdx b/website/src/pages/hi/indexing/tooling/firehose.mdx index 199ab7661bb4..d2a13417500b 100644 --- a/website/src/pages/hi/indexing/tooling/firehose.mdx +++ b/website/src/pages/hi/indexing/tooling/firehose.mdx @@ -4,21 +4,21 @@ title: Firehose ![Firehose Logo](/img/firehose-logo.png) -Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. +Firehose एक नई तकनीक है जिसे StreamingFast ने The Graph Foundation के साथ मिलकर विकसित किया है। यह **उत्पाद ब्लॉकचेन डेटा को indexing करने के लिए पहले कभी नहीं देखी गई क्षमताएँ और गति प्रदान** करता है, जो कि एक फ़ाइल-आधारित और स्ट्रीमिंग-प्रथम दृष्टिकोण का उपयोग करता है। -The Graph merges into Go Ethereum/geth with the adoption of [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0). +The Graph ने Go Ethereum/geth में विलय कर लिया है और [Live Tracer with v1.14.0 release](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0) को अपनाया है। Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. ## Firehose Documentation -The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). +Firehose का दस्तावेज़ वर्तमान में StreamingFast टीम द्वारा [StreamingFast वेबसाइट पर](https://firehose.streamingfast.io/) प्रबंधित किया जाता है। ### शुरू करना -- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. -- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. +- Firehose का परिचय पढ़ें [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) यह जानने के लिए कि यह क्या है और इसे क्यों बनाया गया। +- [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) के बारे में जानें ताकि Firehose को इंस्टॉल और डिप्लॉय किया जा सके। ### Expand Your Knowledge -- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. +- विभिन्न [Firehose components](https://firehose.streamingfast.io/architecture/components) के बारे में जानें। From 6846ca818302361bf870d88fdf08281c82af1845 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:10 -0500 Subject: [PATCH 0405/1534] New translations graphcast.mdx (Romanian) --- website/src/pages/ro/indexing/tooling/graphcast.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/indexing/tooling/graphcast.mdx b/website/src/pages/ro/indexing/tooling/graphcast.mdx index 4072877a1257..cac63bbd9340 100644 --- a/website/src/pages/ro/indexing/tooling/graphcast.mdx +++ b/website/src/pages/ro/indexing/tooling/graphcast.mdx @@ -16,6 +16,6 @@ The Graphcast SDK (Software Development Kit) allows developers to build Radios, - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. -### Learn More +### Află mai multe If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) From d229161cb8197e35700da2fd4ad2135f782a8b30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:14 -0500 Subject: [PATCH 0406/1534] New translations graphcast.mdx (German) --- .../pages/de/indexing/tooling/graphcast.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/de/indexing/tooling/graphcast.mdx b/website/src/pages/de/indexing/tooling/graphcast.mdx index 4072877a1257..ed3f8db0e9d8 100644 --- a/website/src/pages/de/indexing/tooling/graphcast.mdx +++ b/website/src/pages/de/indexing/tooling/graphcast.mdx @@ -2,20 +2,20 @@ title: Graphcast --- -## Introduction +## Einführung -Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? +Gibt es etwas, von dem Sie gerne lernen würden oder das Sie gerne mit anderen Indexern teilen würden, aber das ist Ihnen zu mühsam oder kostet zu viel Gas? -Currently, the cost to broadcast information to other network participants is determined by gas fees on the Ethereum blockchain. Graphcast solves this problem by acting as an optional decentralized, distributed peer-to-peer (P2P) communication tool that allows Indexers across the network to exchange information in real time. The cost of exchanging P2P messages is near zero, with the tradeoff of no data integrity guarantees. Nevertheless, Graphcast aims to provide message validity guarantees (i.e. that the message is valid and signed by a known protocol participant) with an open design space of reputation models. +Derzeit werden die Kosten für die Übertragung von Informationen an andere Netzwerkteilnehmer durch Gasgebühren auf der Ethereum-Blockchain bestimmt. Graphcast löst dieses Problem, indem es als optionales dezentrales, verteiltes Peer-to-Peer (P2P)-Kommunikationstool fungiert, das es Indexern im gesamten Netzwerk ermöglicht, Informationen in Echtzeit auszutauschen. Die Kosten für den Austausch von P2P-Nachrichten sind nahezu gleich Null, wobei die Integrität der Daten nicht garantiert ist. Nichtsdestotrotz zielt Graphcast darauf ab, Garantien für die Gültigkeit von Nachrichten (d.h. dass die Nachricht gültig und von einem bekannten Protokollteilnehmer signiert ist) mit einem offenen Designraum von Reputationsmodellen zu bieten. -The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: +Das Graphcast SDK (Software Development Kit) ermöglicht es Entwicklern, Radios zu erstellen, d.h. klatschgesteuerte Anwendungen, die Indexer ausführen können, um einen bestimmten Zweck zu erfüllen. Wir beabsichtigen auch, einige Radios für die folgenden Anwendungsfälle zu erstellen (oder anderen Entwicklern/Teams, die Radios erstellen möchten, Unterstützung zu bieten): -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. -- Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. +- Echtzeit-Überprüfung der Integrität von Subgraph-Daten ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Durchführung von Auktionen und Koordination für die Warp-Synchronisierung von Subgraphen, Substreams und Firehose-Daten von anderen Indexern. +- Selbstauskunft über die Analyse aktiver Abfragen, einschließlich Subgraph-Anfragevolumen, Gebührenvolumen usw. +- Selbstauskunft über die Indizierungsanalyse, einschließlich der Zeit für die Indizierung von Subgraphen, Gaskosten für die Bearbeitung, aufgetretene Indexierungsfehler usw. +- Selbstauskunft über Stack-Informationen, einschließlich Graph-Node-Version, Postgres-Version, Ethereum-Client-Version, usw. -### Learn More +### Weitere Informationen -If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) +Wenn Sie mehr über Graphcast erfahren möchten, schauen Sie sich die Dokumentation an (https://docs.graphops.xyz/graphcast/intro) From f9f18a96c8666d1b55fd7c7e31f44ae58dfab097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:16 -0500 Subject: [PATCH 0407/1534] New translations graphcast.mdx (Dutch) --- website/src/pages/nl/indexing/tooling/graphcast.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/indexing/tooling/graphcast.mdx b/website/src/pages/nl/indexing/tooling/graphcast.mdx index 4072877a1257..cbc12c17f95b 100644 --- a/website/src/pages/nl/indexing/tooling/graphcast.mdx +++ b/website/src/pages/nl/indexing/tooling/graphcast.mdx @@ -16,6 +16,6 @@ The Graphcast SDK (Software Development Kit) allows developers to build Radios, - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. -### Learn More +### Leer Meer If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) From f3feb4bd7c15d6767b2b8e27dc98fa779b8445d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:18 -0500 Subject: [PATCH 0408/1534] New translations graphcast.mdx (Russian) --- website/src/pages/ru/indexing/tooling/graphcast.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ru/indexing/tooling/graphcast.mdx b/website/src/pages/ru/indexing/tooling/graphcast.mdx index f3b330f6c374..a3c391cf3e4f 100644 --- a/website/src/pages/ru/indexing/tooling/graphcast.mdx +++ b/website/src/pages/ru/indexing/tooling/graphcast.mdx @@ -2,7 +2,7 @@ title: Graphcast --- -## Введение +## Introduction Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). +- Перекрестная проверка целостности данных субграфа в режиме реального времени (Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio/intro)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. From 0b67457aac1bb8cfa49c387c48100f385cea618b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:19 -0500 Subject: [PATCH 0409/1534] New translations graphcast.mdx (Swedish) --- website/src/pages/sv/indexing/tooling/graphcast.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/indexing/tooling/graphcast.mdx b/website/src/pages/sv/indexing/tooling/graphcast.mdx index 843b713c1bd0..213029e1836b 100644 --- a/website/src/pages/sv/indexing/tooling/graphcast.mdx +++ b/website/src/pages/sv/indexing/tooling/graphcast.mdx @@ -16,6 +16,6 @@ Graphcast SDK (Utrustning för programvaruutveckling) gör det möjligt för utv - Självrapportering om indexeringanalys, inklusive tid för delgrafindexering, gasavgifter för handler, påträffade indexeringsfel etc. - Självrapportering om stackinformation inklusive graph-node-version, Postgres-version, Ethereum-klientversion etc. -### Lär dig mer +### Läs mer Om du vill lära dig mer om Graphcast, [kolla in dokumentationen här.](https://docs.graphops.xyz/graphcast/intro) From 24d62692f445264f6fb6080ff993f44453d453b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:20 -0500 Subject: [PATCH 0410/1534] New translations graphcast.mdx (Turkish) --- website/src/pages/tr/indexing/tooling/graphcast.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/indexing/tooling/graphcast.mdx b/website/src/pages/tr/indexing/tooling/graphcast.mdx index 1ddae707e52e..d0bce650e2ae 100644 --- a/website/src/pages/tr/indexing/tooling/graphcast.mdx +++ b/website/src/pages/tr/indexing/tooling/graphcast.mdx @@ -16,6 +16,6 @@ The Graphcast SDK (Software Development Kit) allows developers to build Radios, - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. -### Learn More +### Daha Fazla Bilgi Edin If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) From 3d5fb7813ddf316cbb2c1f9853da6761aec0c62b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:21 -0500 Subject: [PATCH 0411/1534] New translations graphcast.mdx (Chinese Simplified) --- website/src/pages/zh/indexing/tooling/graphcast.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/zh/indexing/tooling/graphcast.mdx b/website/src/pages/zh/indexing/tooling/graphcast.mdx index d8eeb0aa957a..6e29da450727 100644 --- a/website/src/pages/zh/indexing/tooling/graphcast.mdx +++ b/website/src/pages/zh/indexing/tooling/graphcast.mdx @@ -4,7 +4,7 @@ title: Graphcast ## 介绍 -有什么你想以自动化的方式向其他索引人学习或分享的东西吗?但这太麻烦或太费gas了? +有什么你想以自动化的方式向其他索引人学习或分享的东西吗?但这太麻烦或太费燃气费了? 目前,向其他网络参与者广播信息的成本由以太坊区块链上的gas费用决定。Graphcast通过充当一个可选的去中心化、分布式对等(P2P)通信工具来解决这个问题,该工具允许网络上的索引人实时交换信息。在没有数据完整性保证的情况下,交换P2P消息的成本几乎为零。尽管如此,Graphcast旨在通过信誉模型的开放设计空间提供消息有效性保证(即消息是有效的,并由已知的协议参与者签名)。 From 3f524690c02da01808da117a2a9752bff4eab96c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:24 -0500 Subject: [PATCH 0412/1534] New translations graphcast.mdx (Marathi) --- website/src/pages/mr/indexing/tooling/graphcast.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/mr/indexing/tooling/graphcast.mdx b/website/src/pages/mr/indexing/tooling/graphcast.mdx index 281104f50781..46e7c77e864d 100644 --- a/website/src/pages/mr/indexing/tooling/graphcast.mdx +++ b/website/src/pages/mr/indexing/tooling/graphcast.mdx @@ -1,5 +1,5 @@ --- -title: Graphcast +title: ग्राफकास्ट --- ## Introduction @@ -16,6 +16,6 @@ Is there something you'd like to learn from or share with your fellow Indexers i - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. - Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. -### Learn More +### अधिक जाणून घ्या तुम्हाला ग्राफकास्टबद्दल अधिक जाणून घ्यायचे असल्यास, [येथे दस्तऐवजीकरण पहा.](https://docs.graphops.xyz/graphcast/intro) From f66fdcc8ee52e550ec449b0a2de5d94cacf24512 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:25 -0500 Subject: [PATCH 0413/1534] New translations graphcast.mdx (Hindi) --- website/src/pages/hi/indexing/tooling/graphcast.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/hi/indexing/tooling/graphcast.mdx b/website/src/pages/hi/indexing/tooling/graphcast.mdx index a52e116cf941..216fc0a502c5 100644 --- a/website/src/pages/hi/indexing/tooling/graphcast.mdx +++ b/website/src/pages/hi/indexing/tooling/graphcast.mdx @@ -1,8 +1,8 @@ --- -title: ग्राफकास्ट +title: Graphcast --- -## परिचय +## Introduction क्या कोई ऐसी चीज है जिसे आप स्वचालित तरीके से अपने साथी इंडेक्सर्स से सीखना या साझा करना चाहते हैं, लेकिन यह बहुत अधिक परेशानी है या बहुत अधिक लागत है? From f4b89c1d5bba0394c4c78384bc59e9be0e9c8254 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:26 -0500 Subject: [PATCH 0414/1534] New translations benefits.mdx (Romanian) --- website/src/pages/ro/resources/benefits.mdx | 72 ++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ro/resources/benefits.mdx b/website/src/pages/ro/resources/benefits.mdx index d0a0e2675280..eea83da90bbc 100644 --- a/website/src/pages/ro/resources/benefits.mdx +++ b/website/src/pages/ro/resources/benefits.mdx @@ -1,5 +1,5 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | Rețeaua The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | Rețeaua The Graph | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | Rețeaua The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | Rețeaua The Graph | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | Rețeaua The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | Rețeaua The Graph | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month From 444a110c5391d1d91608588affc628b8dbb6cde0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:27 -0500 Subject: [PATCH 0415/1534] New translations benefits.mdx (French) --- website/src/pages/fr/resources/benefits.mdx | 90 ++++++++++----------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/website/src/pages/fr/resources/benefits.mdx b/website/src/pages/fr/resources/benefits.mdx index 280df951b5ff..e44fe8788e98 100644 --- a/website/src/pages/fr/resources/benefits.mdx +++ b/website/src/pages/fr/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: Le réseau Graph vs l'auto-hébergement +title: The Graph vs. l’auto-hébergement socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- Le réseau décentralisé de The Graph a été conçu et affiné pour créer une expérience d'indexation et d'interrogation robuste, et il s'améliore chaque jour grâce à des milliers de contributeurs à travers le monde. -Les avantages de ce protocole décentralisé ne peuvent pas être répliqués en exécutant un `graph-node` localement. Le réseau Graph est plus fiable, plus efficace et moins cher. +Les avantages de ce protocole décentralisé ne peuvent pas être reproduits en exécutant un `graph-node` localement. The Graph Network est plus fiable, plus efficace et moins coûteux. Voici une analyse : @@ -19,7 +19,7 @@ Voici une analyse : ## Les avantages expliqués -### Une structure & de coûts faible et plus flexible +### Structure de coûts plus faible et plus flexible Pas de contrat. Pas de frais mensuels. Vous ne payez que pour les requêtes que vous utilisez, avec un coût moyen par requête de 40 $ par million de requêtes (~0,00004 $ par requête). Les requêtes sont facturées en USD et payées en GRT ou par carte de crédit. @@ -27,67 +27,67 @@ Les coûts d'interrogation peuvent varier ; le coût indiqué est la moyenne au ## Utilisateur à faible volume (moins de 100 000 requêtes par mois) -| Cost Comparison | Auto-hébergé | The Graph Network | -| :-: | :-: | :-: | -| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | -| Frais de requête | + 0 $ | 0$ par mois | -| Temps d'ingénierie | 400 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | -| Requêtes au mois | Limité aux capacités infra | 100 000 (Plan Gratuit) | -| Tarif par requête | 0 $ | 0$ | -| Les infrastructures | Centralisée | Décentralisée | -| La redondance géographique | 750$+ par nœud complémentaire | Compris | -| Temps de disponibilité | Variable | + 99.9% | -| Total des coûts mensuels | + 750 $ | 0 $ | +| Cost Comparison | Auto-hébergé | The Graph Network | +| :----------------------------: | :--------------------------------------: | :-------------------------------------------------------------------------: | +| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | +| Frais de requête | - 0 $ | 0$ par mois | +| Temps d'ingénierie | 400 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | 100 000 (Plan Gratuit) | +| Tarif par requête | 0 $ | 0$ | +| Infrastructure | Centralisée | Décentralisée | +| La redondance géographique | 750$+ par nœud complémentaire | Compris | +| Temps de disponibilité | Variable | - 99.9% | +| Total des coûts mensuels | 750 $+ | 0 $ | ## Utilisateur à volume moyen (~3M requêtes par mois) -| Comparaison de coût | Auto-hébergé | The Graph Network | -| :-: | :-: | :-: | -| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | -| Frais de requête | 500 $ au mois | 120$ par mois | -| Temps d'ingénierie | 800 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | -| Requêtes au mois | Limité aux capacités infra | ~3,000,000 | -| Tarif par requête | 0 $ | $0.00004 | -| L'infrastructure | Centralisée | Décentralisée | -| Frais d'ingénierie | 200 $ au mois | Compris | -| La redondance géographique | 1 200 $ coût total par nœud supplémentaire | Compris | -| Temps de disponibilité | Variable | + 99.9% | -| Total des coûts mensuels | + 1650 $ | 120$ | +| Cost Comparison | Auto-hébergé | The Graph Network | +| :----------------------------: | :-----------------------------------------: | :-------------------------------------------------------------------------: | +| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | +| Frais de requête | 500 $ au mois | 120$ par mois | +| Temps d'ingénierie | 800 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | ~3,000,000 | +| Tarif par requête | 0 $ | $0.00004 | +| Infrastructure | Centralisée | Décentralisée | +| Frais d'ingénierie | 200 $ au mois | Compris | +| La redondance géographique | 1 200 $ coût total par nœud supplémentaire | Compris | +| Temps de disponibilité | Variable | - 99.9% | +| Total des coûts mensuels | 1 650 $+ | 120$ | ## Utilisateur à volume élevé (~30M requêtes par mois) -| Comparaison des coûts | Auto-hébergé | The Graph Network | -| :-: | :-: | :-: | -| Coût mensuel du serveur\* | 1100 $ au mois, par nœud | 0 $ | -| Frais de requête | 4000 $ | 1 200 $ par mois | -| Nombre de nœuds obligatoires | 10 | Sans objet | -| Temps d'ingénierie | 6000 $ ou plus au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | -| Requêtes au mois | Limité aux capacités infra | ~30,000,000 | -| Tarif par requête | 0 $ | 0.00004$ | -| L'infrastructure | Centralisée | Décentralisée | -| La redondance géographique | 1 200 $ de coûts totaux par nœud supplémentaire | Compris | -| Temps de disponibilité | Variable | + 99.9% | -| Total des coûts mensuels | + 11 000 $ | 1,200$ | +| Cost Comparison | Auto-hébergé | The Graph Network | +| :----------------------------: | :------------------------------------------: | :-------------------------------------------------------------------------: | +| Coût mensuel du serveur\* | 1100 $ au mois, par nœud | 0 $ | +| Frais de requête | 4000 $ | 1 200 $ par mois | +| Nombre de nœuds obligatoires | 10 | Sans objet | +| Temps d'ingénierie | 6000 $ ou plus au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | ~30,000,000 | +| Tarif par requête | 0 $ | $0.00004 | +| Infrastructure | Centralisée | Décentralisée | +| La redondance géographique | 1 200 $ coût total par nœud supplémentaire | Compris | +| Temps de disponibilité | Variable | - 99.9% | +| Total des coûts mensuels | 11 000 $+ | 1,200$ | \*y compris les coûts de sauvegarde : $50-$ à 100 dollars au mois Temps d'ingénierie basé sur une hypothèse de 200 $ de l'heure -Reflète le coût pour le consommateur de données. Les frais de requête sont toujours payés aux Indexeurs pour -les requêtes du Plan Gratuit. +Reflète le coût pour le consommateur de données. Les frais de requête sont toujours payés aux Indexeurs pour les requêtes du Plan Gratuit. -Les coûts estimés sont uniquement pour les subgraphs Ethereum Mainnet - les coûts sont encore plus élevés lorsqu'on héberge soi-même un `graph-node` sur d'autres réseaux. Certains utilisateurs peuvent avoir besoin de mettre à jour leur subgraph vers une nouvelle version. En raison des frais de gaz Ethereum, une mise à jour coûte ~50 $ au moment de la rédaction. Notez que les frais de gaz sur [Arbitrum](/archived/arbitrum/arbitrum-faq/) sont considérablement plus bas que ceux d'Ethereum mainnet. +Les coûts estimés concernent uniquement les subgraphs sur le Mainnet d'Ethereum — les coûts sont encore plus élevés lorsqu’un `graph-node` est auto-hébergé sur d’autres réseaux. Certains utilisateurs peuvent avoir besoin de mettre à jour leur subgraph vers une nouvelle version. En raison des frais de gas sur Ethereum, une mise à jour coûte environ 50 $ au moment de la rédaction. Notez que les frais de gas sur [Arbitrum](/archived/arbitrum/arbitrum-faq/) sont nettement inférieurs à ceux du + Mainnet d'Ethereum. Émettre un signal sur un subgraph est un cout net, nul optionnel et unique (par exemple, 1 000 $ de signal peuvent être conservés sur un subgraph, puis retirés - avec la possibilité de gagner des revenus au cours du processus). -## Pas de frais d'installation & plus grande efficacité opérationnelle +## Pas de Coûts d’Installation & Plus grande Efficacité Opérationnelle Commencez tout de suite, sans installation, sans frais généraux et sans matériel. De plus, vous n'avez pas à vous soucier des temps d'arrêt dus à une infrastructure centralisée, ce qui vous laisse plus de temps pour vous concentrer sur votre produit principal. Vous n'avez pas non plus besoin de serveurs de secours, de dépannage ou d'autres ressources techniques coûteuses. ## Fiabilité & Résilience -Le réseau décentralisé du Graph permet aux utilisateurs d'accéder à une redondance géographique qui n'existe pas lorsqu'ils hébergent eux-mêmes un `nœud de Graph`. Les requêtes sont servies de manière fiable grâce à un temps de disponibilité de plus de 99,9 %, obtenu par des centaines d'indexeurs indépendants qui sécurisent le réseau à l'échelle mondiale. +Le réseau décentralisé de The Graph offre aux utilisateurs une redondance géographique qui n'existe pas en auto-hébergeant un `graph-node`. Les requêtes sont servies de manière fiable grâce à une disponibilité de plus de 99,9 %, assurée par des centaines d’Indexers indépendants qui sécurisent le réseau à l’échelle mondiale. -En résumé : Le réseau de graphs est moins coûteux, plus facile à utiliser et produit des résultats supérieurs à ceux obtenus par l'exécution locale d'un `nœud de graphs`. +En résumé : The Graph Network est moins cher, plus facile à utiliser et produit des résultats supérieurs à ceux obtenus par l'exécution locale d'un `graph-node`. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Commencez à utiliser The Graph Network dès aujourd’hui et découvrez comment [publier votre subgraph sur le réseau décentralisé de The Graph](/subgraphs/quick-start/). From 8de0bfd125fbd6e93f601c3e6aa6baa367aeb16d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:28 -0500 Subject: [PATCH 0416/1534] New translations benefits.mdx (Spanish) --- website/src/pages/es/resources/benefits.mdx | 82 ++++++++++----------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/es/resources/benefits.mdx b/website/src/pages/es/resources/benefits.mdx index 6109ec463439..ff9487ee3401 100644 --- a/website/src/pages/es/resources/benefits.mdx +++ b/website/src/pages/es/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- La red descentralizada de The Graph ha sido diseñada y refinada para crear una experiencia sólida de indexación y consulta, y está mejorando cada día gracias a miles de contribuyentes en todo el mundo. -Los beneficios de este protocolo descentralizado no se pueden replicar ejecutando un `graph-node` localmente. La Red de The Graph es más confiable, eficiente y menos costosa. +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. Aqui hay un analisis: @@ -19,7 +19,7 @@ Aqui hay un analisis: ## Los beneficios explicados -### Estructura de costos más baja y más flexible +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Comparación de costos | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensual del servidor\* | $350 por mes | $0 | -| Costos de consulta | $0+ | $0 per month | -| Tiempo de ingeniería | $400 por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | -| Consultas por mes | Limitado a capacidades de infraestructura | 100,000 (Free Plan) | -| Costo por consulta | $0 | $0 | -| Infraestructura | Centralizado | Descentralizado | -| Redundancia geográfica | $750+ por nodo adicional | Incluido | -| Tiempo de actividad | Varía | 99.9%+ | -| Costos mensuales totales | $750+ | $0 | +| Comparación de costos | Self Hosted | The Graph Network | +| :------------------------------: | :---------------------------------------: | :-------------------------------------------------------------------: | +| Costo mensual del servidor\* | $350 por mes | $0 | +| Costos de consulta | $0+ | $0 per month | +| Tiempo de ingeniería | $400 por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | +| Consultas por mes | Limitado a capacidades de infraestructura | 100,000 (Free Plan) | +| Costo por consulta | $0 | $0 | +| Infrastructure | Centralizado | Descentralizado | +| Redundancia geográfica | $750+ por nodo adicional | Incluido | +| Tiempo de actividad | Varía | 99.9%+ | +| Costos mensuales totales | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Comparación de costos | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensual del servidor\* | $350 por mes | $0 | -| Costos de consulta | $500 por mes | $120 per month | -| Tiempo de ingeniería | $800 por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | -| Consultas por mes | Limitado a capacidades de infraestructura | ~3,000,000 | -| Costo por consulta | $0 | $0.00004 | -| Infraestructura | Centralizado | Descentralizado | -| Gastos de ingeniería | $200 por hora | Incluido | -| Redundancia geográfica | $1,200 en costos totales por nodo adicional | Incluido | -| Tiempo de actividad | Varía | 99.9%+ | -| Costos mensuales totales | $1,650+ | $120 | +| Comparación de costos | Self Hosted | The Graph Network | +| :------------------------------: | :-----------------------------------------: | :-------------------------------------------------------------------: | +| Costo mensual del servidor\* | $350 por mes | $0 | +| Costos de consulta | $500 por mes | $120 per month | +| Tiempo de ingeniería | $800 por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | +| Consultas por mes | Limitado a capacidades de infraestructura | ~3,000,000 | +| Costo por consulta | $0 | $0.00004 | +| Infrastructure | Centralizado | Descentralizado | +| Gastos de ingeniería | $200 por hora | Incluido | +| Redundancia geográfica | $1,200 en costos totales por nodo adicional | Incluido | +| Tiempo de actividad | Varía | 99.9%+ | +| Costos mensuales totales | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Comparación de costos | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensual del servidor\* | $1100 por mes, por nodo | $0 | -| Costos de consulta | $4000 | $1,200 per month | -| Número de nodos necesarios | 10 | No aplica | -| Tiempo de ingeniería | $6,000 o más por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | -| Consultas por mes | Limitado a capacidades de infraestructura | ~30,000,000 | -| Costo por consulta | $0 | $0.00004 | -| Infraestructura | Centralizado | Descentralizado | -| Redundancia geográfica | $1,200 en costos totales por nodo adicional | Incluido | -| Tiempo de actividad | Varía | 99.9%+ | -| Costos mensuales totales | $11,000+ | $1,200 | +| Comparación de costos | Self Hosted | The Graph Network | +| :------------------------------: | :-----------------------------------------: | :-------------------------------------------------------------------: | +| Costo mensual del servidor\* | $1100 por mes, por nodo | $0 | +| Costos de consulta | $4000 | $1,200 per month | +| Número de nodos necesarios | 10 | No aplica | +| Tiempo de ingeniería | $6,000 o más por mes | Ninguno, integrado en la red con Indexadores distribuidos globalmente | +| Consultas por mes | Limitado a capacidades de infraestructura | ~30,000,000 | +| Costo por consulta | $0 | $0.00004 | +| Infrastructure | Centralizado | Descentralizado | +| Redundancia geográfica | $1,200 en costos totales por nodo adicional | Incluido | +| Tiempo de actividad | Varía | 99.9%+ | +| Costos mensuales totales | $11,000+ | $1,200 | \*incluidos los costos de copia de seguridad: $50-$100 por mes @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe La señal de curación en un subgrafo es una acción opcional de única vez y no tiene costo neto (por ejemplo, se pueden curar $1k en señales en un subgrafo y luego retirarlas, con el potencial de obtener retornos en el proceso). -## Sin costos de configuración & mayor eficiencia operativa +## No Setup Costs & Greater Operational Efficiency Tarifas de instalación cero. Comienza de inmediato sin costos generales ni de instalación. Sin requisitos de hardware. Sin interrupciones debido a la infraestructura centralizada y más tiempo para concentrarte en tu producto principal. No se necesitan servidores de respaldo, resolución de problemas ni costosos recursos de ingeniería. -## Confiabilidad & Resistencia +## Reliability & Resiliency The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -En conclusión: The Graph Network es menos costoso, más fácil de usar y produce resultados superiores en comparación con ejecutar un `graph-node` localmente. +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From c797b785c5251ef916e6a759d8eb78ff92146978 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:29 -0500 Subject: [PATCH 0417/1534] New translations benefits.mdx (Arabic) --- website/src/pages/ar/resources/benefits.mdx | 72 ++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ar/resources/benefits.mdx b/website/src/pages/ar/resources/benefits.mdx index d1284dce22f1..067e4ef592a8 100644 --- a/website/src/pages/ar/resources/benefits.mdx +++ b/website/src/pages/ar/resources/benefits.mdx @@ -1,5 +1,5 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| البنية الأساسية | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| البنية الأساسية | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| البنية الأساسية | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month From 7e397c53607163059e4d79519a907a25cd5b438f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:30 -0500 Subject: [PATCH 0418/1534] New translations benefits.mdx (Czech) --- website/src/pages/cs/resources/benefits.mdx | 84 ++++++++++----------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/cs/resources/benefits.mdx b/website/src/pages/cs/resources/benefits.mdx index 1f8942457a79..e075e6832a33 100644 --- a/website/src/pages/cs/resources/benefits.mdx +++ b/website/src/pages/cs/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: Síť grafů vs. vlastní hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- Decentralizovaná síť Grafu byla navržena a zdokonalena tak, aby umožňovala robustní indexování a dotazování - a díky tisícům přispěvatelů z celého světa se každým dnem zlepšuje. -Výhody tohoto decentralizovaného protokolu nelze replikovat lokálním spuštěním `graf-node`. Síť Graf je spolehlivější, efektivnější a levnější. +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. Zde je analýza: @@ -19,7 +19,7 @@ Zde je analýza: ## Vysvětlení výhod -### Nižší & flexibilnější struktura nákladů +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Srovnání nákladů | Vlastní hostitel | The Graph Network | -| :-: | :-: | :-: | -| Měsíční náklady na server\* | $350 měsíčně | $0 | -| Náklady na dotazování | $0+ | $0 per month | -| Inženýrský čas | $400 měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | -| Dotazy za měsíc | Omezeno na infra schopnosti | 100,000 (Free Plan) | -| Náklady na jeden dotaz | $0 | $0 | -| Infrastruktura | Centralizovaný | Decentralizované | -| Geografická redundancy | $750+ Usd za další uzel | Zahrnuto | -| Provozuschopnost | Různé | 99.9%+ | -| Celkové měsíční náklady | $750+ | $0 | +| Srovnání nákladů | Vlastní hostitel | The Graph Network | +| :-------------------------: | :-------------------------------------: | :-----------------------------------------------------------: | +| Měsíční náklady na server\* | $350 měsíčně | $0 | +| Náklady na dotazování | $0+ | $0 per month | +| Inženýrský čas | $400 měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | +| Dotazy za měsíc | Omezeno na infra schopnosti | 100,000 (Free Plan) | +| Náklady na jeden dotaz | $0 | $0 | +| Infrastructure | Centralizovaný | Decentralizované | +| Geografická redundancy | $750+ Usd za další uzel | Zahrnuto | +| Provozuschopnost | Různé | 99.9%+ | +| Celkové měsíční náklady | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Srovnání nákladů | Vlastní hostitel | The Graph Network | -| :-: | :-: | :-: | -| Měsíční náklady na server\* | $350 měsíčně | $0 | -| Náklady na dotazování | $500 měsíčně | $120 per month | -| Inženýrský čas | $800 měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | -| Dotazy za měsíc | Omezeno na infra schopnosti | ~3,000,000 | -| Náklady na jeden dotaz | $0 | $0.00004 | -| Infrastruktura | Centralizovaný | Decentralizované | -| Výdaje inženýrskou | $200 za hodinu | Zahrnuto | -| Geografická redundancy | $1,200 celkových nákladů na další uzel | Zahrnuto | -| Provozuschopnost | Různé | 99.9%+ | -| Celkové měsíční náklady | $1,650+ | $120 | +| Srovnání nákladů | Vlastní hostitel | The Graph Network | +| :-------------------------: | :----------------------------------------: | :-----------------------------------------------------------: | +| Měsíční náklady na server\* | $350 měsíčně | $0 | +| Náklady na dotazování | $500 měsíčně | $120 per month | +| Inženýrský čas | $800 měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | +| Dotazy za měsíc | Omezeno na infra schopnosti | ~3,000,000 | +| Náklady na jeden dotaz | $0 | $0.00004 | +| Infrastructure | Centralizovaný | Decentralizované | +| Výdaje inženýrskou | $200 za hodinu | Zahrnuto | +| Geografická redundancy | $1,200 celkových nákladů na další uzel | Zahrnuto | +| Provozuschopnost | Různé | 99.9%+ | +| Celkové měsíční náklady | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Srovnání nákladů | Vlastní hostitel | The Graph Network | -| :-: | :-: | :-: | -| Měsíční náklady na server\* | $1100 měsíčně za uzel | $0 | -| Náklady na dotazování | $4000 | $1,200 per month | -| Počet potřebných uzlů | 10 | Nepoužije se | -| Inženýrský čas | 6$, 000 nebo více měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | -| Dotazy za měsíc | Omezeno na infra schopnosti | ~30,000,000 | -| Náklady na jeden dotaz | $0 | $0.00004 | -| Infrastruktura | Centralizovaný | Decentralizované | -| Geografická redundancy | $1,200 celkových nákladů na další uzel | Zahrnuto | -| Provozuschopnost | Různé | 99.9%+ | -| Celkové měsíční náklady | $11,000+ | $1,200 | +| Srovnání nákladů | Vlastní hostitel | The Graph Network | +| :-------------------------: | :-----------------------------------------: | :-----------------------------------------------------------: | +| Měsíční náklady na server\* | $1100 měsíčně za uzel | $0 | +| Náklady na dotazování | $4000 | $1,200 per month | +| Počet potřebných uzlů | 10 | Nepoužije se | +| Inženýrský čas | 6$, 000 nebo více měsíčně | Žádné, zabudované do sítě s globálně distribuovanými indexery | +| Dotazy za měsíc | Omezeno na infra schopnosti | ~30,000,000 | +| Náklady na jeden dotaz | $0 | $0.00004 | +| Infrastructure | Centralizovaný | Decentralizované | +| Geografická redundancy | $1,200 celkových nákladů na další uzel | Zahrnuto | +| Provozuschopnost | Různé | 99.9%+ | +| Celkové měsíční náklady | $11,000+ | $1,200 | \*včetně nákladů na zálohování: $50-$100 měsíčně @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe Kurátorování signálu na podgrafu je volitelný jednorázový čistý nulový náklad (např. na podgrafu lze kurátorovat signál v hodnotě $1k a později jej stáhnout - s potenciálem získat v tomto procesu výnosy). -## Žádné náklady na nastavení & vyšší efektivita provozu +## No Setup Costs & Greater Operational Efficiency Nulové instalační poplatky. Začněte ihned pracovat bez jakýchkoli nákladů na zřízení nebo režijních nákladů. Žádné hardwarové požadavky. Žádné výpadky kvůli centralizované infrastruktuře a více času na soustředění se na váš hlavní produkt . Žádná potřeba záložních serverů, řešení problémů nebo drahých technických zdrojů. -## Spolehlivost & odolnost +## Reliability & Resiliency -Decentralizovaná síť Grafu poskytuje uživatelům přístup ke geografické redundanci, která při vlastním hostování `graph-node` neexistuje. Dotazy jsou spolehlivě obsluhovány díky 99,. 9%+ uptime, kterého je dosaženo stovkami nezávislých indexerů zabezpečujících síť po celém světě. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -Podtrženo a sečteno: Síť Graf je levnější, jednodušší na používání a poskytuje lepší výsledky než lokální provozování `graph-node`. +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 8cd3085c3e7ef38e20a9028ed1b36e99db05db07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:31 -0500 Subject: [PATCH 0419/1534] New translations benefits.mdx (German) --- website/src/pages/de/resources/benefits.mdx | 126 ++++++++++---------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/website/src/pages/de/resources/benefits.mdx b/website/src/pages/de/resources/benefits.mdx index 8497b5111a6d..cf5c89128111 100644 --- a/website/src/pages/de/resources/benefits.mdx +++ b/website/src/pages/de/resources/benefits.mdx @@ -1,92 +1,92 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph vs. Self-Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- -The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. +Das dezentralisierte Netzwerk von The Graph wurde entwickelt und verfeinert, um eine robuste Indizierung und Abfrage zu ermöglichen - und es wird dank Tausender Mitwirkender auf der ganzen Welt jeden Tag besser. -The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. +Die Vorteile dieses dezentralen Protokolls können nicht durch den lokalen Betrieb eines `graph-node` repliziert werden. The Graph Network ist zuverlässiger, effizienter und kostengünstiger. -Here is an analysis: +Hier ist eine Analyse: -## Why You Should Use The Graph Network +## Warum Sie das The Graph Network nutzen sollten -- Significantly lower monthly costs -- $0 infrastructure setup costs -- Superior uptime -- Access to hundreds of independent Indexers around the world -- 24/7 technical support by global community +- Deutlich niedrigere monatliche Kosten +- $0 Kosten für die Einrichtung der Infrastruktur +- Hervorragende Betriebszeiten +- Zugang zu Hunderten von unabhängigen Indexern auf der ganzen Welt +- 24/7 technische Unterstützung durch die globale Community -## The Benefits Explained +## Die Vorteile werden erklärt -### Lower & more Flexible Cost Structure +### Niedrigere und flexiblere Kostenstruktur -No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. +Keine Verträge. Keine monatlichen Gebühren. Sie zahlen nur für die Abfragen, die Sie nutzen - mit durchschnittlichen Kosten pro Abfrage von $40 pro Million Abfragen (~$0,00004 pro Abfrage). Abfragen werden in USD abgerechnet und in GRT oder per Kreditkarte bezahlt. -Query costs may vary; the quoted cost is the average at time of publication (March 2024). +Die Abfragekosten können variieren; die angegebenen Kosten sind der Durchschnitt zum Zeitpunkt der Veröffentlichung (März 2024). -## Low Volume User (less than 100,000 queries per month) +## Benutzer mit geringem Volumen (weniger als 100.000 Abfragen pro Monat) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastruktur | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Kostenvergleich | Selbst gehostet | The Graph Network | +| :--------------------------: | :---------------------------------------: | :-------------------------------------------------------------: | +| Monatliche Serverkosten\* | $350 pro Monat | $0 | +| Abfragekosten | $0+ | $0 pro Monat | +| Entwicklungszeit | $400 pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | +| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | 100.000 (kostenloser Plan) | +| Kosten pro Abfrage | $0 | $0 | +| Infrastructure | Zentralisiert | Dezentralisiert | +| Geografische Redundanz | $750+ pro zusätzlichem Knoten | Eingeschlossen | +| Betriebszeit | Variiert | 99.9%+ | +| Monatliche Gesamtkosten | $750+ | $0 | -## Medium Volume User (~3M queries per month) +## Benutzer mit mittlerem Volumen (~3M Abfragen pro Monat) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastruktur | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Kostenvergleich | Selbst gehostet | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monatliche Serverkosten\* | $350 pro Monat | $0 | +| Abfragekosten | $500 pro Monat | $120 pro Monat | +| Entwicklungszeit | $800 pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | +| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | ~3,000,000 | +| Kosten pro Abfrage | $0 | $0.00004 | +| Infrastructure | Zentralisiert | Dezentralisiert | +| Engineering-Kosten | $200 pro Stunde | Eingeschlossen | +| Geografische Redundanz | $1,200 Gesamtkosten pro zusätzlichem Knoten | Eingeschlossen | +| Betriebszeit | Variiert | 99.9%+ | +| Monatliche Gesamtkosten | $1.650+ | $120 | -## High Volume User (~30M queries per month) +## Benutzer mit hohem Volumen (~30M Abfragen pro Monat) -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastruktur | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Kostenvergleich | Selbst gehostet | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monatliche Serverkosten\* | $1100 pro Monat, pro Knoten | $0 | +| Abfragekosten | $4000 | $1,200 pro Monat | +| Anzahl der benötigten Knoten | 10 | Nicht anwendbar | +| Entwicklungszeit | $6,000 oder mehr pro Monat | Keine, eingebaut in das Netzwerk mit global verteilten Indexern | +| Abfragen pro Monat | Begrenzt auf infrastrukturelle Funktionen | ~30,000,000 | +| Kosten pro Abfrage | $0 | $0.00004 | +| Infrastructure | Zentralisiert | Dezentralisiert | +| Geografische Redundanz | $1,200 Gesamtkosten pro zusätzlichem Knoten | Eingeschlossen | +| Betriebszeit | Variiert | 99.9%+ | +| Monatliche Gesamtkosten | $11,000+ | $1,200 | -\*including costs for backup: $50-$100 per month +\*einschließlich der Kosten für die Datensicherung: $50-$100 pro Monat -Engineering time based on $200 per hour assumption +Engineering-Zeit auf der Grundlage von 200 $ pro Stunde angenommen -Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. +Reflektiert die Kosten für den Datenkonsumenten. Für Abfragen im Rahmen des „Free Plan“ werden nach wie vor Abfragegebühren an Indexer gezahlt. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +Die geschätzten Kosten gelten nur für Ethereum Mainnet Subgraphen - die Kosten sind noch höher, wenn man selbst einen `graph-node` in anderen Netzwerken hostet. Einige Nutzer müssen ihren Subgraphen möglicherweise auf eine neue Version aktualisieren. Aufgrund der Ethereum-Gas-Gebühren kostet ein Update zum Zeitpunkt des Schreibens ~$50. Beachten Sie, dass die Gasgebühren auf [Arbitrum](/archived/arbitrum/arbitrum-faq/) wesentlich niedriger sind als im Ethereum Mainnet. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Das Kuratieren von Signalen auf einem Subgraphen ist eine optionale, einmalige Investition mit Netto-Nullkosten (z.B. können Signale im Wert von 1.000 Dollar auf einem Subgraphen kuratiert und später wieder abgezogen werden - mit dem Potenzial, dabei Renditen zu erzielen). -## No Setup Costs & Greater Operational Efficiency +## Keine Einrichtungskosten und größere betriebliche Effizienz -Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. +Keine Einrichtungsgebühren. Sie können sofort loslegen, ohne Einrichtungs- oder Gemeinkosten. Keine Hardware-Anforderungen. Keine Ausfälle aufgrund einer zentralisierten Infrastruktur und mehr Zeit, um sich auf Ihr Kernprodukt zu konzentrieren. Kein Bedarf an Backup-Servern, Fehlersuche oder teuren technischen Ressourcen. -## Reliability & Resiliency +## Zuverlässigkeit und Ausfallsicherheit -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. +Das dezentralisierte Netzwerk von The Graph bietet den Nutzern Zugang zu einer geografischen Redundanz, die beim Selbsthosten eines `graph-node` nicht gegeben ist. Abfragen werden dank einer Betriebszeit von über 99,9 %, die durch Hunderte unabhängiger Indexer erreicht wird, die das Netzwerk weltweit sichern, zuverlässig bedient. -Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. +Unterm Strich: Das The Graph Network ist kostengünstiger, einfacher zu benutzen und liefert bessere Ergebnisse als ein lokaler `graph-node`. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Beginnen Sie noch heute mit der Nutzung von The Graph Network und erfahren Sie, wie Sie [Ihren Subgraphут im dezentralen Netzwerk von The Graph veröffentlichen](/subgraphs/quick-start/). From 14183606ef60533ea0e1efdc99becb01f9e50b98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:32 -0500 Subject: [PATCH 0420/1534] New translations benefits.mdx (Italian) --- website/src/pages/it/resources/benefits.mdx | 82 ++++++++++----------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/it/resources/benefits.mdx b/website/src/pages/it/resources/benefits.mdx index e514868affa4..fa7f83dfe600 100644 --- a/website/src/pages/it/resources/benefits.mdx +++ b/website/src/pages/it/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: The Graph Network contro Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- La rete decentralizzata di The Graph è stata progettata e perfezionata per creare una solida esperienza di indicizzazione e query, e migliora ogni giorno grazie a migliaia di collaboratori in tutto il mondo. -I vantaggi di questo protocollo decentralizzato non possono essere replicati eseguendo un `graph-node` a livello locale. The Graph Network è più affidabile, più efficiente e meno costosa. +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. Ecco l'analisi: @@ -19,7 +19,7 @@ Ecco l'analisi: ## I vantaggi spiegati -### Struttura dei costi più bassa e flessibile +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Confronto costi | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensile del server\* | $350 al mese | $0 | -| Costi di query | $0+ | $0 per month | -| Tempo di progettazione | $400 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | -| Query al mese | Limitato alle capacità di infra | 100,000 (Free Plan) | -| Costo per query | $0 | $0 | -| Infrastruttura | Centralizzato | Decentralizzato | -| Ridondanza geografica | $750+ per nodo aggiuntivo | Incluso | -| Tempo di attività | Variabile | 99.9%+ | -| Costo totale mensile | $750+ | $0 | +| Confronto costi | Self Hosted | The Graph Network | +| :--------------------------------: | :-------------------------------------: | :---------------------------------------------------------------------------: | +| Costo mensile del server\* | $350 al mese | $0 | +| Costi di query | $0+ | $0 per month | +| Tempo di progettazione | $400 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità di infra | 100,000 (Free Plan) | +| Costo per query | $0 | $0 | +| Infrastructure | Centralizzato | Decentralizzato | +| Ridondanza geografica | $750+ per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Confronto dei costi | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensile del server\* | $350 al mese | $0 | -| Costi di query | $500 al mese | $120 per month | -| Tempo di progettazione | $800 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | -| Query al mese | Limitato alle capacità dell'infrastruttura | ~3,000,000 | -| Costo per query | $0 | $0.00004 | -| Infrastruttura | Centralizzato | Decentralizzato | -| Costi di ingegneria | $200 all'ora | Incluso | -| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | -| Tempo di attività | Variabile | 99.9%+ | -| Costo totale mensile | $1,650+ | $120 | +| Confronto costi | Self Hosted | The Graph Network | +| :--------------------------------: | :----------------------------------------: | :---------------------------------------------------------------------------: | +| Costo mensile del server\* | $350 al mese | $0 | +| Costi di query | $500 al mese | $120 per month | +| Tempo di progettazione | $800 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità di infra | ~3,000,000 | +| Costo per query | $0 | $0.00004 | +| Infrastructure | Centralizzato | Decentralizzato | +| Costi di ingegneria | $200 all'ora | Incluso | +| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Confronto costi | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Costo mensile del server\* | $1100 al mese, per nodo | $0 | -| Costi di query | $4000 | $1,200 per month | -| Numero di nodi necessari | 10 | Non applicabile | -| Tempo di progettazione | $6.000 o più al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | -| Query al mese | Limitato alle capacità di infra | ~30,000,000 | -| Costo per query | $0 | $0.00004 | -| Infrastruttura | Centralizzato | Decentralizzato | -| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | -| Tempo di attività | Variabile | 99.9%+ | -| Costo totale mensile | $11,000+ | $1,200 | +| Confronto costi | Self Hosted | The Graph Network | +| :--------------------------------: | :-----------------------------------------: | :---------------------------------------------------------------------------: | +| Costo mensile del server\* | $1100 al mese, per nodo | $0 | +| Costi di query | $4000 | $1,200 per month | +| Numero di nodi necessari | 10 | Non applicabile | +| Tempo di progettazione | $6.000 o più al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità di infra | ~30,000,000 | +| Costo per query | $0 | $0.00004 | +| Infrastructure | Centralizzato | Decentralizzato | +| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $11,000+ | $1,200 | \*inclusi i costi per il backup: $50-$100 al mese @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe La curation del segnale su un subgraph è opzionale, una tantum, a costo zero (ad esempio, $1.000 in segnale possono essere curati su un subgraph e successivamente ritirati, con un potenziale di guadagno nel processo). -## Nessun costo di installazione e maggiore efficienza operativa +## No Setup Costs & Greater Operational Efficiency Zero costi di configurazione. Iniziate subito senza costi di configurazione o spese generali. Nessun requisito hardware. Nessuna interruzione a causa dell'infrastruttura centralizzata, e più tempo per concentrarsi sul prodotto principale. Non sono necessari i server di backup, risoluzione dei problemi o risorse ingegneristiche costose. -## Affidabilità e resilienza +## Reliability & Resiliency The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -In conclusione: The Graph Network è meno costosa, più facile da usare e produce risultati superiori rispetto alla gestione locale di `graph-node`. +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From fa87186f47e7e82a2979d779619eda2a0efa6289 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:33 -0500 Subject: [PATCH 0421/1534] New translations benefits.mdx (Japanese) --- website/src/pages/ja/resources/benefits.mdx | 84 ++++++++++----------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/ja/resources/benefits.mdx b/website/src/pages/ja/resources/benefits.mdx index f9312ab51eeb..c3572b674b7d 100644 --- a/website/src/pages/ja/resources/benefits.mdx +++ b/website/src/pages/ja/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: グラフネットワークとセルフホスティングの比較 +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- Graph の分散型ネットワークは、堅牢なインデックス作成とクエリのエクスペリエンスを作成するために設計および改良されており、世界中の何千人もの貢献者のおかげで、毎日改善されています。 -`graph-node` をローカルで実行しても、この分散型プロトコルの利点を再現することはできません。グラフ ネットワークは、より信頼性が高く、より効率的で、安価です。 +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. ここに分析があります: @@ -19,7 +19,7 @@ Graph の分散型ネットワークは、堅牢なインデックス作成と ## 以下に利点を説明します -### ローランプ、フレキシブルなコスト構造 +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| コスト比較 | セルフホスト | グラフネットワーク | -| :-: | :-: | :-: | -| 月額サーバー代 | $350/月 | $0 | -| クエリコスト | $0+ | $0 per month | -| エンジニアリングタイム | $400/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | -| 月ごとのクエリ | インフラ機能に限定 | 100,000 (Free Plan) | -| クエリごとのコスト | $0 | $0 | -| インフラストラクチャ | 集中管理型 | 分散型 | -| 地理的な冗長性 | 追加1ノードにつき$750+ | 含まれる | -| アップタイム | バリエーション | 99.9%+ | -| 月額費用合計 | $750+ | $0 | +| コスト比較 | セルフホスト | グラフネットワーク | +| :---------------------: | :-------------------------------------: | :---------------------------------: | +| 月額サーバー代 | $350/月 | $0 | +| クエリコスト | $0+ | $0 per month | +| エンジニアリングタイム | $400/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | +| 月ごとのクエリ | インフラ機能に限定 | 100,000 (Free Plan) | +| クエリごとのコスト | $0 | $0 | +| Infrastructure | 集中管理型 | 分散型 | +| 地理的な冗長性 | 追加1ノードにつき$750+ | 含まれる | +| アップタイム | バリエーション | 99.9%+ | +| 月額費用合計 | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| コスト比較 | セルフホスト | グラフネットワーク | -| :-: | :-: | :-: | -| 月額サーバー代 | $350/月 | $0 | -| クエリコスト | $500/月 | $120 per month | -| エンジニアリングタイム | $800/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | -| 月ごとのクエリ | インフラ機能に限定 | ~3,000,000 | -| クエリごとのコスト | $0 | $0.00004 | -| インフラストラクチャ | 中央管理型 | 分散型 | -| エンジニアリングコスト | $200/時 | 含まれる | -| 地理的な冗長性 | ノード追加1台につき合計1,200ドル | 含まれる | -| アップタイム | 変動 | 99.9%+ | -| 月額費用合計 | $1,650+ | $120 | +| コスト比較 | セルフホスト | グラフネットワーク | +| :---------------------: | :----------------------------------------: | :---------------------------------: | +| 月額サーバー代 | $350/月 | $0 | +| クエリコスト | $500/月 | $120 per month | +| エンジニアリングタイム | $800/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | +| 月ごとのクエリ | インフラ機能に限定 | ~3,000,000 | +| クエリごとのコスト | $0 | $0.00004 | +| Infrastructure | 集中管理型 | 分散型 | +| エンジニアリングコスト | $200/時 | 含まれる | +| 地理的な冗長性 | ノード追加1台につき合計1,200ドル | 含まれる | +| アップタイム | バリエーション | 99.9%+ | +| 月額費用合計 | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| コスト比較 | セルフホスト | グラフネットワーク | -| :-: | :-: | :-: | -| 月額サーバー代 | $1100/月(ノードごと) | $0 | -| クエリコスト | $4000 | $1,200 per month | -| 必要ノード数 | 10 | 該当なし | -| エンジニアリングタイム | $6,000/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | -| 月ごとのクエリ | インフラ機能に限定 | ~30,000,000 | -| クエリごとのコスト | $0 | $0.00004 | -| インフラストラクチャ | 集中管理型 | 分散型 | -| 地理的な冗長性 | ノード追加1台につき合計1,200ドル | 含まれる | -| アップタイム | 変動 | 99.9%+ | -| 月額費用合計 | $11,000+ | $1,200 | +| コスト比較 | セルフホスト | グラフネットワーク | +| :---------------------: | :-----------------------------------------: | :---------------------------------: | +| 月額サーバー代 | $1100/月(ノードごと) | $0 | +| クエリコスト | $4000 | $1,200 per month | +| 必要ノード数 | 10 | 該当なし | +| エンジニアリングタイム | $6,000/月 | なし/ グローバルに分散されたインデクサーでネットワークに組み込まれる | +| 月ごとのクエリ | インフラ機能に限定 | ~30,000,000 | +| クエリごとのコスト | $0 | $0.00004 | +| Infrastructure | 集中管理型 | 分散型 | +| 地理的な冗長性 | ノード追加1台につき合計1,200ドル | 含まれる | +| アップタイム | バリエーション | 99.9%+ | +| 月額費用合計 | $11,000+ | $1,200 | \*バックアップ費用含む:月額$50〜$100 @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe サブグラフ上のシグナルのキュレーションは、オプションで1回限り、ネットゼロのコストで可能です(例えば、$1,000のシグナルをサブグラフ上でキュレーションし、後で引き出すことができ、その過程でリターンを得る可能性があります)。 -## セットアップ費用不要; 運用効率アップ。 +## No Setup Costs & Greater Operational Efficiency セットアップ費用ゼロ。セットアップや諸経費が不要で、すぐに使い始めることができます。ハードウェアは必要ありません。集中管理されたインフラストラクチャによる障害もなく、より多くの時間をコア製品に集中させることができます。バックアップサーバー、トラブルシューティング、高価なエンジニアリングリソースは必要ありません。 -## 信頼性と信頼性回復力 +## Reliability & Resiliency -Graphの分散型ネットワークは、`graph-node`をセルフホストする場合には存在しない地理的な冗長性をユーザーに提供します。何百もの独立したIndexerがグローバルにネットワークを保護することで、クエリは99.9%以上のアップタイムで確実に提供されます。 +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -結論: グラフ ネットワークは、ローカルで `graph-node` を実行するよりも安価で使いやすく、優れた結果を生み出します。 +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 6947bf7129f4b8867bad9d14457824a0e3affb7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:34 -0500 Subject: [PATCH 0422/1534] New translations benefits.mdx (Korean) --- website/src/pages/ko/resources/benefits.mdx | 72 ++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/ko/resources/benefits.mdx b/website/src/pages/ko/resources/benefits.mdx index c4539171103b..1d4d1ac5ff96 100644 --- a/website/src/pages/ko/resources/benefits.mdx +++ b/website/src/pages/ko/resources/benefits.mdx @@ -1,5 +1,5 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | The Graph 네트워크 | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | The Graph 네트워크 | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | The Graph 네트워크 | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | The Graph 네트워크 | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | The Graph 네트워크 | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | The Graph 네트워크 | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month From 01f1720edb5392975b098ba1b8500e2ed4ae49e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:36 -0500 Subject: [PATCH 0423/1534] New translations benefits.mdx (Dutch) --- website/src/pages/nl/resources/benefits.mdx | 84 ++++++++++----------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/nl/resources/benefits.mdx b/website/src/pages/nl/resources/benefits.mdx index 6e0dff48b863..10d8dde52b25 100644 --- a/website/src/pages/nl/resources/benefits.mdx +++ b/website/src/pages/nl/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: Het Graph Netwerk vs. Zelf Hosten +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- Het gedecentraliseerde netwerk van The Graph is ontworpen en verfijnd om een robuuste ervaring te creëren bij het indexeren en opvragen van data. Het netwerk wordt iedere dag sterker door de duizenden bijdragers wereldwijd. -De voordelen van dit gedecentraliseerde protocol is dat het niet gerepliceerd kan worden door een `graph-node` lokaal te laten werken. Het Graph Netwerk is betrouwbaarder, efficiënter en goedkoper. +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. Hier is een analyse: @@ -19,7 +19,7 @@ Hier is een analyse: ## De voordelen uitgelegd -### Lagere & meer flexibele kostenstructuur +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Kostenvergelijking | Zelf hosten | De Graph Netwerk | -| :-: | :-: | :-: | -| Maandelijkse serverkosten | $350 per maand | $0 | -| Querykosten | $0+ | $0 per month | -| Onderhoud tijd | $400 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | -| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | 100,000 (Free Plan) | -| Kosten per query | $0 | $0 | -| Infrastructuur | Gecentraliseerd | Gedecentraliseerd | -| Geografische redundantie | $750+ per extra node | Inbegrepen | -| Uptime | Wisselend | 99,9%+ | -| Totale maandelijkse kosten | $750+ | $0 | +| Kostenvergelijking | Zelf hosten | De Graph Netwerk | +| :------------------------: | :-------------------------------------: | :----------------------------------------------------------------------------------------------: | +| Maandelijkse serverkosten | $350 per maand | $0 | +| Querykosten | $0+ | $0 per month | +| Onderhoud tijd | $400 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | 100,000 (Free Plan) | +| Kosten per query | $0 | $0 | +| Infrastructure | Gecentraliseerd | Gedecentraliseerd | +| Geografische redundantie | $750+ per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Kostenvergelijking | Zelf hosten | De Graph Netwerk | -| :-: | :-: | :-: | -| Maandelijkse serverkosten | $350 per maand | $0 | -| Querykosten | $500 per maand | $120 per month | -| Onderhoud | $800 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | -| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | ~3,000,000 | -| Kosten per query | $0 | $0.00004 | -| Infrastructuur | Gecentraliseerd | Gedecentraliseerd | -| Technische personeelskosten | $200 per uur | Inbegrepen | -| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | -| Uptime | Wisselend | 99,9%+ | -| Totale maandelijkse kosten | $1650+ | $120 | +| Kostenvergelijking | Zelf hosten | De Graph Netwerk | +| :-------------------------: | :----------------------------------------: | :----------------------------------------------------------------------------------------------: | +| Maandelijkse serverkosten | $350 per maand | $0 | +| Querykosten | $500 per maand | $120 per month | +| Onderhoud tijd | $800 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | ~3,000,000 | +| Kosten per query | $0 | $0.00004 | +| Infrastructure | Gecentraliseerd | Gedecentraliseerd | +| Technische personeelskosten | $200 per uur | Inbegrepen | +| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Kostenvergelijking | Zelf hosten | De Graph Netwerk | -| :-: | :-: | :-: | -| Maandelijkse serverkosten | $1100 per maand, per node | $0 | -| Querykosten | $4000 | $1,200 per month | -| Aantal benodigde nodes | 10 | Niet van toepassing | -| Onderhoud | $6000 of meer per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | -| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | ~30,000,000 | -| Kosten per query | $0 | $0.00004 | -| Infrastructuur | Gecentraliseerd | Gedecentraliseerd | -| Geografische redundantie | $1200 in totale kosten per extra node | Inbegrepen | -| Uptime | Wisselend | 99,9%+ | -| Totale maandelijkse kosten | $11000+ | $1,200 | +| Kostenvergelijking | Zelf hosten | De Graph Netwerk | +| :------------------------: | :-----------------------------------------: | :----------------------------------------------------------------------------------------------: | +| Maandelijkse serverkosten | $1100 per maand, per node | $0 | +| Querykosten | $4000 | $1,200 per month | +| Aantal benodigde nodes | 10 | Niet van toepassing | +| Onderhoud tijd | $6000 of meer per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | ~30,000,000 | +| Kosten per query | $0 | $0.00004 | +| Infrastructure | Gecentraliseerd | Gedecentraliseerd | +| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $11,000+ | $1,200 | \*inclusief kosten voor een back-up: $50-$100 per maand @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe Signaal cureren op een subgraph is een optionele eenmalige, kostenneutrale actie (bijv. $1000 aan signaal kan worden gecureerd op een subgraph en later worden opgenomen - met het potentieel om rendementen te verdienen tijdens het proces). -## Geen voorafgaande kosten & grotere operationele efficiëntie +## No Setup Costs & Greater Operational Efficiency Geen voorafgaande kosten. Begin direct zonder voorafgaande- of overheadkosten. Geen hardwarevereisten. Geen uitval door gecentraliseerde infrastructuur, en meer tijd om je te concentreren op je kernproduct. Geen noodzaak voor back-up servers, probleemoplossing of dure engineeringtijd. -## Betrouwbaarheid & Veerkrachtigheid +## Reliability & Resiliency -Het gedecentraliseerde netwerk van The Graph biedt gebruikers toegang tot geografische redundantie die niet bestaat bij het zelf hosten van een `graph-node`. Query's worden betrouwbaar verwerkt dankzij een uptime van 99.9%+, behaald door honderden onafhankelijke Indexeerders die het netwerk wereldwijd beveiligen. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -Samenvattend: Het Graph Network is goedkoper, gemakkelijker te gebruiken en levert superieure resultaten in vergelijking met het lokaal hosten van een `graph-node`. +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 567c949d12ffa12bb7c969d8f766922c829e5f3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:37 -0500 Subject: [PATCH 0424/1534] New translations benefits.mdx (Polish) --- website/src/pages/pl/resources/benefits.mdx | 72 ++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/pl/resources/benefits.mdx b/website/src/pages/pl/resources/benefits.mdx index da704fd8db8d..8242856b35ec 100644 --- a/website/src/pages/pl/resources/benefits.mdx +++ b/website/src/pages/pl/resources/benefits.mdx @@ -1,5 +1,5 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | Sieć The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | Sieć The Graph | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | Sieć The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | Sieć The Graph | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | Sieć The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | Sieć The Graph | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month From 17a23b71045c6786fa344f1e6437863451b530a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:38 -0500 Subject: [PATCH 0425/1534] New translations benefits.mdx (Portuguese) --- website/src/pages/pt/resources/benefits.mdx | 89 ++++++++++----------- 1 file changed, 44 insertions(+), 45 deletions(-) diff --git a/website/src/pages/pt/resources/benefits.mdx b/website/src/pages/pt/resources/benefits.mdx index b632c3496de5..1c3543394160 100644 --- a/website/src/pages/pt/resources/benefits.mdx +++ b/website/src/pages/pt/resources/benefits.mdx @@ -5,7 +5,7 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg A rede descentralizada do The Graph é construída e refinada para criar uma experiência robusta de indexação e consultas—e ela melhora diariamente, graças a milhares de contribuintes ao redor do mundo. -Os benefícios deste protocolo descentralizado não podem ser replicados ao executar um `graph-node` localmente. A Graph Network é mais confiável, mais eficiente, e mais barata. +As vantagens deste protocolo descentralizado não podem ser replicadas com a execução local de um `graph-node`. A Graph Network é mais estável, mais eficiente, e mais barata. Aqui está uma análise: @@ -19,7 +19,7 @@ Aqui está uma análise: ## Os Benefícios -### Estrutura Mais Flexível & Custo Menor +### Estrutura Mais Flexível e Custo Menor Nada de contratos ou taxas mensais. Só pague pelos queries que usa — com um custo-por-query de $40 dólares por milhão de queries (cerca de $0.0004 por query). Estes tem preços em Dólar (USD), pagos em GRT ou cartão de crédito. @@ -27,67 +27,66 @@ Os custos de query podem variar; o custo citado é o normal até o fechamento da ## Utilizador de Baixo Volume (menos de 100 mil queries por mês) -| Comparação de Custos | Auto-hospedagem | The Graph Network | -| :-: | :-: | :-: | -| Custo mensal de servidor\* | $350 por mês | $0 | -| Custos de query | $0+ | $0 por mês | -| Tempo de engenharia | $400 por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | -| Queries por mês | Limitadas pelas capabilidades da infra | 100 mil (Plano Grátis) | -| Custo por query | $0 | $0 | -| Infraestrutura | Centralizada | Descentralizada | -| Redundância geográfica | $750+ por node adicional | Incluída | -| Uptime (disponibilidade) | Varia | 99.9%+ | -| Custos mensais totais | $750+ | $0 | +| Comparação de Custos | Auto-hospedagem | The Graph Network | +| :-----------------------------: | :-------------------------------------: | :---------------------------------------------------------------: | +| Custo mensal de servidor\* | $350 por mês | $0 | +| Custos de query | $0+ | $0 por mês | +| Tempo de engenharia | $400 por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | +| Queries por mês | Limitadas pelas capabilidades da infra | 100 mil (Plano Grátis) | +| Custo por query | $0 | $0 | +| Infrastructure | Centralizada | Descentralizada | +| Redundância geográfica | $750+ por node adicional | Incluída | +| Uptime (disponibilidade) | Varia | 99.9%+ | +| Custos mensais totais | $750+ | $0 | ## Utilizador de Volume Médio (cerca de 3 milhões de queries por mês) -| Comparação de Custos | Auto-hospedagem | The Graph Network | -| :-: | :-: | :-: | -| Custo mensal de servidor\* | $350 por mês | $0 | -| Custos de query | $500 por mês | $120 por mês | -| Tempo de engenharia | $800 por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | -| Queries por mês | Limitadas pelas capabilidades da infra | ~3 milhões | -| Custo por query | $0 | $0.00004 | -| Infraestrutura | Centralizada | Descentralizada | -| Custo de engenharia | $200 por hora | Incluído | -| Redundância geográfica | $1.200 em custos totais por node adicional | Incluída | -| Uptime (disponibilidade) | Varia | 99.9%+ | -| Custos mensais totais | $1.650+ | $120 | +| Comparação de Custos | Auto-hospedagem | The Graph Network | +| :-----------------------------: | :----------------------------------------: | :---------------------------------------------------------------: | +| Custo mensal de servidor\* | $350 por mês | $0 | +| Custos de query | $500 por mês | $120 por mês | +| Tempo de engenharia | $800 por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | +| Queries por mês | Limitadas pelas capabilidades da infra | ~3 milhões | +| Custo por query | $0 | $0.00004 | +| Infrastructure | Centralizada | Descentralizada | +| Custo de engenharia | $200 por hora | Incluída | +| Redundância geográfica | $1.200 em custos totais por node adicional | Incluída | +| Uptime (disponibilidade) | Varia | 99.9%+ | +| Custos mensais totais | $1.650+ | $120 | ## Utilizador de Volume Alto (cerca de 30 milhões de queries por mês) -| Comparação de Custos | Auto-hospedagem | The Graph Network | -| :-: | :-: | :-: | -| Custo mensal de servidor\* | $1.100 por mês, por node | $0 | -| Custos de query | $4.000 | $1,200 por mês | -| Número de nodes necessário | 10 | Não se aplica | -| Tempo de engenharia | $6.000 ou mais por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | -| Queries por mês | Limitadas pelas capabilidades da infra | Cerca de 30 milhões | -| Custo por query | $0 | $0.00004 | -| Infraestrutura | Centralizada | Descentralizada | -| Redundância geográfica | $1.200 em custos totais por node adicional | Incluída | -| Uptime (disponibilidade) | Varia | 99.9%+ | -| Custos mensais totais | $11.000+ | $1.200 | +| Comparação de Custos | Auto-hospedagem | The Graph Network | +| :-----------------------------: | :-----------------------------------------: | :---------------------------------------------------------------: | +| Custo mensal de servidor\* | $1.100 por mês, por node | $0 | +| Custos de query | $4.000 | $1,200 por mês | +| Número de nodes necessário | 10 | Não se aplica | +| Tempo de engenharia | $6.000 ou mais por mês | Nenhum, embutido na rede com Indexadores distribuídos globalmente | +| Queries por mês | Limitadas pelas capabilidades da infra | Cerca de 30 milhões | +| Custo por query | $0 | $0.00004 | +| Infrastructure | Centralizada | Descentralizada | +| Redundância geográfica | $1.200 em custos totais por node adicional | Incluída | +| Uptime (disponibilidade) | Varia | 99.9%+ | +| Custos mensais totais | $11.000+ | $1.200 | \*com custos de backup incluídos: $50-$100 por mês Tempo de engenharia baseado numa hipótese de $200 por hora -Reflete o custo ao consumidor de dados. Taxas de query ainda são pagas a Indexadores por queries do Plano -Grátis. +Reflete o custo ao consumidor de dados. Taxas de query ainda são pagas a Indexadores por queries do Plano Grátis. -Os custos estimados são apenas para subgraphs na Mainnet do Ethereum — os custos são maiores ao auto-hospedar um `graph-node` em outras redes. Alguns utilizadores devem atualizar o seu subgraph a uma versão mais recente. Até o fechamento deste texto, devido às taxas de gas do Ethereum, uma atualização custa cerca de 50 dólares. Note que as taxas de gás no [Arbitrum](/archived/arbitrum/arbitrum-faq/) são muito menores que as da mainnet do Ethereum. +Os custos estimados são apenas para subgraphs na Mainnet do Ethereum — os custos são maiores ao auto-hospedar um graph-node em outras redes. Alguns utilizadores devem atualizar o seu subgraph a uma versão mais recente. Até o fechamento deste texto, devido às taxas de gas do Ethereum, uma atualização custa cerca de 50 dólares. Note que as taxas de gás no [Arbitrum](/archived/arbitrum/arbitrum-faq/) são muito menores que as da mainnet do Ethereum. Curar um sinal em um subgraph é um custo opcional, único, e zero-líquido (por ex., $1 mil em um subgraph pode ser curado em um subgraph, e depois retirado — com potencial para ganhar retornos no processo). -## Zero Custos de Preparação & Eficiência Operacional Maior +## Zero Custos de Preparação e Mais Eficiência Operacional Zero taxas de setup. Comece imediatamente, sem custos de setup ou gerais. Nenhum requisito de hardware. Zero falhas por infraestruturas centralizadas, e mais tempo para se concentrar no seu produto principal. Não há necessidade de servidores de backup, solução de problemas ou recursos caríssimos de engenharia. -## Confiabilidade & Resiliência +## Confiabilidade e Resiliência -A rede descentralizada do The Graph permite que os utilizadores acessem redundâncias geográficas que não existem ao auto-hospedar um `graph-node`. Os queries são servidos de maneira confiável graças ao uptime de mais de 99.9%, alcançado por centenas de Indexadores independentes que protegem a rede globalmente. +A rede descentralizada do The Graph permite que os utilizadores acessem redundâncias geográficas que não existem ao auto-hospedar um graph-node. Os queries são servidos de maneira confiável graças ao uptime de mais de 99.9%, alcançado por centenas de Indexadores independentes que protegem a rede globalmente. -Enfim: A Graph Network é mais barata e fácil de usar, e produz resultados superiores comparados à execução local de um `graph-node`. +Enfim: A Graph Network é mais barata e fácil de usar, e produz resultados melhores comparados à execução local de um graph-node. -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +Comece a usar a Graph Network hoje, e aprenda como [editar o seu subgraph na rede descentralizada do The Graph](/subgraphs/quick-start/). From 6ca7f011a81482d17e5b59949f0ec1c21aaa3b7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:39 -0500 Subject: [PATCH 0426/1534] New translations benefits.mdx (Russian) --- website/src/pages/ru/resources/benefits.mdx | 94 ++++++++++----------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/website/src/pages/ru/resources/benefits.mdx b/website/src/pages/ru/resources/benefits.mdx index 761f08267b6a..799390c31b89 100644 --- a/website/src/pages/ru/resources/benefits.mdx +++ b/website/src/pages/ru/resources/benefits.mdx @@ -1,25 +1,25 @@ --- -title: Сеть The Graph по сравнению с Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- -Децентрализованная сеть Graph была спроектирована и усовершенствована для создания надежной системы индексации и запросов — и с каждым днем она становится лучше благодаря тысячам участников по всему миру. +Децентрализованная сеть The Graph была спроектирована и усовершенствована для создания надежной системы индексации и запросов — и с каждым днем она становится лучше благодаря тысячам участников по всему миру. -Преимущества этого децентрализованного протокола не могут быть воспроизведены путем локального запуска `graph-node`. Сеть The Graph более надежна, эффективна и менее затратна. +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. Вот анализ: -## Почему Вы должны использовать сеть The Graph +## Почему Вы должны использовать The Graph Network - Significantly lower monthly costs -- затраты на установку инфраструктуры в размере $0 -- Превосходное время безотказной работы -- Access to hundreds of independent Indexers around the world +- Затраты на настройкку инфраструктуры в размере $0 +- Оптимальное время безотказной работы +- Доступ к сотням независимых Индексаторов по всему миру - 24/7 техническая поддержка со стороны глобального сообщества ## Преимущества -### Более низкая& и гибкая структура затрат +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | -| :-: | :-: | :-: | -| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | -| Стоимость запроса | $0+ | $0 per month | -| Время разработки | $400 в месяц | Никто, встроен в сеть с глобально распределенными индексаторами | -| Запросы в месяц | Ограничен возможностями инфраструктуры | 100,000 (Free Plan) | -| Стоимость одного запроса | $0 | $0 | -| Инфраструктура | Централизованная | Децентрализованная | -| Географическая избыточность | $750+ за каждую дополнительную ноду | Включено | -| Время безотказной работы | Варьируется | 99.9%+ | -| Общие ежемесячные расходы | $750+ | $0 | +| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | +| :-----------------------------: | :-------------------------------------: | :-----------------------------------------------------------: | +| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | +| Стоимость запроса | $0+ | $0 per month | +| Время разработки | $400 в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | 100,000 (Free Plan) | +| Стоимость одного запроса | $0 | $0 | +| Infrastructure | Централизованная | Децентрализованная | +| Географическая избыточность | $750+ за каждую дополнительную ноду | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | -| :-: | :-: | :-: | -| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | -| Стоимость запроса | $500 в месяц | $120 per month | -| Время разработки | $800 в месяц | Никто, встроен в сеть с глобально распределенными индексаторами | -| Запросы в месяц | Ограничен возможностями инфраструктуры | ~3,000,000 | -| Стоимость одного запроса | $0 | $0.00004 | -| Инфраструктура | Централизованная | Децентрализованная | -| Инженерные расходы | $200 в час | Включено | -| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | -| Время безотказной работы | Варьируется | 99.9%+ | -| Общие ежемесячные расходы | $1,650+ | $120 | +| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | +| :-----------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | +| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | +| Стоимость запроса | $500 в месяц | $120 per month | +| Время разработки | $800 в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | ~3,000,000 | +| Стоимость одного запроса | $0 | $0.00004 | +| Infrastructure | Централизованная | Децентрализованная | +| Инженерные расходы | $200 в час | Включено | +| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | -| :-: | :-: | :-: | -| Ежемесячная стоимость сервера\* | $1100 в месяц за ноду | $0 | -| Стоимость запроса | $4000 | $1,200 per month | -| Необходимое количество нод | 10 | Не подходит | -| Время разработки | $6,000 или больше в месяц | Никто, встроен в сеть с глобально распределенными индексаторами | -| Запросы в месяц | Ограничен возможностями инфраструктуры | ~30,000,000 | -| Стоимость одного запроса | $0 | $0.00004 | -| Инфраструктура | Централизованная | Децентрализованная | -| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | -| Время безотказной работы | Варьируется | 99.9%+ | -| Общие ежемесячные расходы | $11,000+ | $1,200 | +| Сравнение затрат | Самостоятельный хостинг | Сеть The Graph | +| :-----------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | +| Ежемесячная стоимость сервера\* | $1100 в месяц за ноду | $0 | +| Стоимость запроса | $4000 | $1,200 per month | +| Необходимое количество нод | 10 | Не подходит | +| Время разработки | $6,000 или больше в месяц | Нет, встроен в сеть с глобально распределенными Индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | ~30,000,000 | +| Стоимость одного запроса | $0 | $0.00004 | +| Infrastructure | Централизованная | Децентрализованная | +| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $11,000+ | $1,200 | \* включая расходы на резервное копирование: $50-$100 в месяц @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe Курирование сигнала на субграфе - это необязательная единовременная стоимость, равная нулю (например, сигнал стоимостью 1 тыс. долларов может быть курирован на субграфе, а затем отозван - с возможностью получения прибыли в процессе). -## Отсутствие затрат на настройку & повышение операционной эффективности +## No Setup Costs & Greater Operational Efficiency -Нулевая плата за установку. Приступайте к работе немедленно, без каких-либо затрат на настройку или накладные расходы. Никаких требований к оборудованию. Отсутствие перебоев в работе из-за централизованной инфраструктуры и больше времени для концентрации на вашем основном продукте. Нет необходимости в резервных серверах, устранении неполадок или дорогостоящих инженерных ресурсах. +Нулевая плата за установку. Приступайте к работе немедленно, без каких-либо затрат на настройку или накладные расходы. Никаких требований к оборудованию. Отсутствие перебоев в работе из-за централизованной инфраструктуры и больше времени для концентрации на Вашем основном продукте. Нет необходимости в резервных серверах, устранении неполадок или дорогостоящих инженерных ресурсах. -## Надежность & отказоустойчивость +## Reliability & Resiliency The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -Итог: Сеть Graph дешевле, проще в использовании и дает превосходные результаты по сравнению с запуском `graph-node` локально. +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 7c0cd0e2b134ed05a4b9cc036dc6d730eac2a9fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:40 -0500 Subject: [PATCH 0427/1534] New translations benefits.mdx (Swedish) --- website/src/pages/sv/resources/benefits.mdx | 84 ++++++++++----------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/sv/resources/benefits.mdx b/website/src/pages/sv/resources/benefits.mdx index 1fe3ee664fd5..a1d5a93f4471 100644 --- a/website/src/pages/sv/resources/benefits.mdx +++ b/website/src/pages/sv/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: The Graf Nätverk vs. Egen Värd +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- The Graphs decentraliserade nätverk har utformats och finslipats för att skapa en robust indexering och frågeupplevelse - och det blir bättre för varje dag tack vare tusentals bidragsgivare runt om i världen. -Fördelarna med detta decentraliserade protokoll kan inte replikeras genom att köra en `graph-node` lokalt. The Graph Nätverk är mer pålitligt, mer effektivt och mindre kostsamt. +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. Här är en analys: @@ -19,7 +19,7 @@ Här är en analys: ## Fördelarna förklarade -### Lägre & Mer Flexibel Kostnadsstruktur +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | -| :-: | :-: | :-: | -| Månatlig kostnad för server\* | $350 per månad | $0 | -| Kostnad för frågor | $0+ | $0 per month | -| Konstruktionstid | $400 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | -| Frågor per månad | Begränsad till infra kapacitet | 100,000 (Free Plan) | -| Kostnad per fråga | $0 | $0 | -| Infrastruktur | Centraliserad | Decentraliserad | -| Geografisk redundans | $750+ per extra nod | Inkluderat | -| Drifttid | Varierande | 99.9%+ | -| Total Månadskostnad | $750+ | $0 | +| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | +| :---------------------------: | :-------------------------------------: | :-----------------------------------------------------------: | +| Månatlig kostnad för server\* | $350 per månad | $0 | +| Kostnad för frågor | $0+ | $0 per month | +| Konstruktionstid | $400 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | 100,000 (Free Plan) | +| Kostnad per fråga | $0 | $0 | +| Infrastructure | Centraliserad | Decentraliserad | +| Geografisk redundans | $750+ per extra nod | Inkluderat | +| Drifttid | Varierande | 99.9%+ | +| Total Månadskostnad | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | -| :-: | :-: | :-: | -| Månadskostnad för server\* | $350 per månad | $0 | -| Kostnad för frågor | $500 per månad | $120 per month | -| Ingenjörstid | $800 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | -| Frågor per månad | Begränsad till infra kapacitet | ~3,000,000 | -| Kostnad per fråga | $0 | $0.00004 | -| Infrastruktur | Centraliserad | Decentraliserad | -| Kostnader för ingenjörsarbete | $200 per timme | Inkluderat | -| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | -| Drifttid | Varierar | 99.9%+ | -| Total Månadskostnad | $1,650+ | $120 | +| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | +| :---------------------------: | :----------------------------------------: | :-----------------------------------------------------------: | +| Månatlig kostnad för server\* | $350 per månad | $0 | +| Kostnad för frågor | $500 per månad | $120 per month | +| Konstruktionstid | $800 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | ~3,000,000 | +| Kostnad per fråga | $0 | $0.00004 | +| Infrastructure | Centraliserad | Decentraliserad | +| Kostnader för ingenjörsarbete | $200 per timme | Inkluderat | +| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | +| Drifttid | Varierande | 99.9%+ | +| Total Månadskostnad | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | -| :-: | :-: | :-: | -| Månadskostnad för server\* | $1100 per månad, per nod | $0 | -| Kostnad för frågor | $4000 | $1,200 per month | -| Antal noder som behövs | 10 | Ej tillämpligt | -| Ingenjörstid | $6,000 eller mer per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | -| Frågor per månad | Begränsad till infra kapacitet | ~30,000,000 | -| Kostnad per fråga | $0 | $0.00004 | -| Infrastruktur | Centraliserad | Decentraliserad | -| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | -| Drifttid | Varierar | 99.9%+ | -| Total Månadskostnad | $11,000+ | $1,200 | +| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | +| :---------------------------: | :-----------------------------------------: | :-----------------------------------------------------------: | +| Månatlig kostnad för server\* | $1100 per månad, per nod | $0 | +| Kostnad för frågor | $4000 | $1,200 per month | +| Antal noder som behövs | 10 | Ej tillämpligt | +| Konstruktionstid | $6,000 eller mer per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | ~30,000,000 | +| Kostnad per fråga | $0 | $0.00004 | +| Infrastructure | Centraliserad | Decentraliserad | +| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | +| Drifttid | Varierande | 99.9%+ | +| Total Månadskostnad | $11,000+ | $1,200 | \*inklusive kostnader för backup: $50-$100 per månad @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe Att kurera signal på en subgraf är en valfri engångskostnad med noll nettokostnad (t.ex. $1k i signal kan kurera på en subgraf och senare dras tillbaka - med potential att tjäna avkastning i processen). -## Inga Installationskostnader & Ökad Driftseffektivitet +## No Setup Costs & Greater Operational Efficiency Inga installationsavgifter. Kom igång direkt utan installations- eller overheadkostnader. Inga hårdvarukrav. Inga avbrott på grund av centraliserad infrastruktur och mer tid att fokusera på din kärnprodukt. Ingen nödvändighet för backup-servrar, felsökning eller dyra ingenjörsresurser. -## Tillförlitlighet & Motståndskraft +## Reliability & Resiliency -The Graphs decentraliserade nätverk ger användare tillgång till geografisk redundans som inte existerar när man själv-hostar en `graph-node`. Förfrågningar betjänas pålitligt tack vare en drifttid på 99,9% eller mer, uppnådd av hundratals oberoende Indexers som säkrar nätverket globalt. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -Sammanfattningsvis är The Graph Nätverk mindre kostsamt, enklare att använda och ger överlägsna resultat jämfört med att köra en `graph-node` lokalt. +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From a1c899acf28c92f9a69de8c9e890a34c8ad2546c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:41 -0500 Subject: [PATCH 0428/1534] New translations benefits.mdx (Turkish) --- website/src/pages/tr/resources/benefits.mdx | 74 ++++++++++----------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/tr/resources/benefits.mdx b/website/src/pages/tr/resources/benefits.mdx index 0d2a68affb02..e752013bb83b 100644 --- a/website/src/pages/tr/resources/benefits.mdx +++ b/website/src/pages/tr/resources/benefits.mdx @@ -1,5 +1,5 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | Graph Ağı | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Altyapı | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | Graph Ağı | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | Graph Ağı | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Altyapı | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | Graph Ağı | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | Graph Ağı | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Altyapı | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | Graph Ağı | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month @@ -85,7 +85,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -Graph'ın merkeziyetsiz ağı, kullanıcılara bir `graph node`'unu kendi kendine barındırırken sahip olmadıkları coğrafi yedeklemeye de erişim sağlar. Ağın küresel güvenliğini sağlayan yüzlerce bağımsız indeksleyici tarafından ulaşılan %99,9+ çalışma süresi sayesinde sorgular güvenilir bir şekilde sunulur. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. From 9c43dbcde91a2230c71f6945a08670658e5da0e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:42 -0500 Subject: [PATCH 0429/1534] New translations benefits.mdx (Ukrainian) --- website/src/pages/uk/resources/benefits.mdx | 78 ++++++++++----------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/website/src/pages/uk/resources/benefits.mdx b/website/src/pages/uk/resources/benefits.mdx index 39f77a7a7d71..a4cd2c2f0392 100644 --- a/website/src/pages/uk/resources/benefits.mdx +++ b/website/src/pages/uk/resources/benefits.mdx @@ -1,5 +1,5 @@ --- -title: Мережа The Graph в порівнянні з Самостійним хостингом +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- @@ -19,7 +19,7 @@ Here is an analysis: ## The Benefits Explained -### Нижча & більш Гнучка структура витрат +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | -| :-: | :-: | :-: | -| Щомісячна плата за сервер\* | $350 на місяць | $0 | -| Вартість запитів | $0+ | $0 per month | -| Час технічного обслуговування | $400 на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | -| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | 100,000 (Free Plan) | -| Вартість одного запиту | $0 | $0 | -| Інфраструктура | Централізована | Децентралізована | -| Географічне резервування | $750+ за кожну додаткову ноду | Включено | -| Час безвідмовної роботи | Варіюється | 99.9%+ | -| Загальна сума щомісячних витрат | $750+ | $0 | +| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | +| :---------------------------------------: | :----------------------------------------: | :---------------------------------------------------------------: | +| Щомісячна плата за сервер\* | $350 на місяць | $0 | +| Вартість запитів | $0+ | $0 per month | +| Час технічного обслуговування | $400 на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | +| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | 100,000 (Free Plan) | +| Вартість одного запиту | $0 | $0 | +| Infrastructure | Централізована | Децентралізована | +| Географічне резервування | $750+ за кожну додаткову ноду | Включено | +| Час безвідмовної роботи | Варіюється | 99.9%+ | +| Загальна сума щомісячних витрат | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | -| :-: | :-: | :-: | -| Щомісячна плата за сервер\* | $350 на місяць | $0 | -| Вартість запитів | $500 на місяць | $120 per month | -| Час технічного обслуговування | $800 на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | -| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | ~3,000,000 | -| Вартість одного запиту | $0 | $0.00004 | -| Інфраструктура | Централізована | Децентралізована | -| Інженерно-технічні витрати | $200 на годину | Включено | -| Географічне резервування | $1,200 загальних витрат на кожну додаткову ноду | Включено | -| Час безвідмовної роботи | Варіюється | 99.9%+ | -| Загальна сума щомісячних витрат | $1,650+ | $120 | +| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | +| :---------------------------------------: | :---------------------------------------------: | :---------------------------------------------------------------: | +| Щомісячна плата за сервер\* | $350 на місяць | $0 | +| Вартість запитів | $500 на місяць | $120 per month | +| Час технічного обслуговування | $800 на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | +| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | ~3,000,000 | +| Вартість одного запиту | $0 | $0.00004 | +| Infrastructure | Централізована | Децентралізована | +| Інженерно-технічні витрати | $200 на годину | Включено | +| Географічне резервування | $1,200 загальних витрат на кожну додаткову ноду | Включено | +| Час безвідмовної роботи | Варіюється | 99.9%+ | +| Загальна сума щомісячних витрат | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | -| :-: | :-: | :-: | -| Щомісячна плата за сервер\* | $1100 на місяць, за одну ноду | $0 | -| Вартість запитів | $4000 | $1,200 per month | -| Кількість необхідних нод | 10 | Не стосується | -| Час технічного обслуговування | $6,000 і більше на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | -| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | ~30,000,000 | -| Вартість одного запиту | $0 | $0.00004 | -| Інфраструктура | Централізована | Децентралізована | -| Географічне резервування | $1,200 загальних витрат на кожну додаткову ноду | Включено | -| Час безвідмовної роботи | Варіюється | 99.9%+ | -| Загальна сума щомісячних витрат | $11,000+ | $1,200 | +| Порівняння вартості послуг | Самостійний хостинг | Graph мережа | +| :---------------------------------------: | :---------------------------------------------: | :---------------------------------------------------------------: | +| Щомісячна плата за сервер\* | $1100 на місяць, за одну ноду | $0 | +| Вартість запитів | $4000 | $1,200 per month | +| Кількість необхідних нод | 10 | Не стосується | +| Час технічного обслуговування | $6,000 і більше на місяць | Немає, вбудовані в мережу з глобально розподіленими індексаторами | +| Кількість запитів за місяць | Обмежується інфраструктурними можливостями | ~30,000,000 | +| Вартість одного запиту | $0 | $0.00004 | +| Infrastructure | Централізована | Децентралізована | +| Географічне резервування | $1,200 загальних витрат на кожну додаткову ноду | Включено | +| Час безвідмовної роботи | Варіюється | 99.9%+ | +| Загальна сума щомісячних витрат | $11,000+ | $1,200 | \*включаючи витрати на резервне копіювання: $50-$100 на місяць @@ -79,11 +79,11 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe Кураторство сигналу на підграфі є опціональною одноразовою послугою з нульовою вартістю (наприклад, сигнал на суму 1 тис. доларів можна розмістити на підграфі, а потім вивести — з можливістю отримання прибутку в цьому процесі). -## Відсутність витрат на налаштування & Більша ефективність роботи +## No Setup Costs & Greater Operational Efficiency Нульова комісія за налаштування. Почніть роботу негайно без витрат на налаштування та інших додаткових витрат. Ніяких жорстких вимог до апаратного обладнання. Відсутність перебоїв через централізовану інфраструктуру та більше часу для концентрації на основному продукті. Нема потреби в резервних серверах, усуненні несправностей або дорогих інженерно-технічних послугах. -## Надійність & Стійкість +## Reliability & Resiliency The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. From 1d24bbb93f6dc52e2a77a57e897bad032207a614 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:44 -0500 Subject: [PATCH 0430/1534] New translations benefits.mdx (Chinese Simplified) --- website/src/pages/zh/resources/benefits.mdx | 82 ++++++++++----------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/zh/resources/benefits.mdx b/website/src/pages/zh/resources/benefits.mdx index 484b2c2c410a..b24acf39974e 100644 --- a/website/src/pages/zh/resources/benefits.mdx +++ b/website/src/pages/zh/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: Graph 网络与自托管 +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- Graph的去中心化网络经过精心设计和完善,创造了强大的索引和查询体验,由于世界各地成千上万的贡献者,它每天都在变得更好。 -这种去中心化协议的好处无法通过在本地运行`graph-node`来复制。Graph网络更可靠、更高效、更便宜。 +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. 以下是分析: @@ -19,7 +19,7 @@ Graph的去中心化网络经过精心设计和完善,创造了强大的索引 ## 好处解释 -### 更低& 更灵活的成本结构 +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| 成本比较 | 自托管 | Graph网络 | -| :------------------: | :-------------------------------------: | :----------------------------------------: | -| 每月服务器费用 \* | 每月350美元 | 0美元 | -| 查询成本 | $0+ | $0 per month | -| 工程时间 | 400美元每月 | 没有,内置在具有全球去中心化索引者的网络中 | -| 每月查询 | 受限于基础设施能力 | 100,000 (Free Plan) | -| 每个查询的成本 | 0美元 | $0 | -| 基础设施 | 中心化 | 去中心化 | -| 异地备援 | 每个额外节点 $750 + | 包括在内 | -| 正常工作时间 | 变量 | 99.9%+ | -| 每月总成本 | $750+ | 0美元 | +| 成本比较 | 自托管 | Graph网络 | +| :--------------: | :-------------------------------------: | :-------------------: | +| 每月服务器费用 \* | 每月350美元 | 0美元 | +| 查询成本 | $0+ | $0 per month | +| 工程时间 | 400美元每月 | 没有,内置在具有全球去中心化索引者的网络中 | +| 每月查询 | 受限于基础设施能力 | 100,000 (Free Plan) | +| 每个查询的成本 | 0美元 | $0 | +| Infrastructure | 中心化 | 去中心化 | +| 异地备援 | 每个额外节点 $750 + | 包括在内 | +| 正常工作时间 | 变量 | 99.9%+ | +| 每月总成本 | $750+ | 0美元 | ## Medium Volume User (~3M queries per month) -| 成本比较 | 自托管 | Graph网络 | -| :------------------: | :---------------------------------------------: | :----------------------------------------: | -| 每月服务器费用 \* | 每月350美元 | 0美元 | -| 查询成本 | 每月500美元 | $120 per month | -| 工程时间 | 每月800美元 | 没有,内置在具有全球去中心化索引者的网络中 | -| 每月查询 | 受限于基础设施能力 | ~3,000,000 | -| 每个查询的成本 | 0美元 | $0.00004 | -| 基础设施 | 中心化 | 去中心化 | -| 工程费用 | 每小时200美元 | 包括在内 | -| 异地备援 | 每个额外节点的总成本为1200美元 | 包括在内 | -| 正常工作时间 | 变量 | 99.9%+ | -| 每月总成本 | 1650美元以上 | $120 | +| 成本比较 | 自托管 | Graph网络 | +| :--------------: | :----------------------------------------: | :-------------------: | +| 每月服务器费用 \* | 每月350美元 | 0美元 | +| 查询成本 | 每月500美元 | $120 per month | +| 工程时间 | 每月800美元 | 没有,内置在具有全球去中心化索引者的网络中 | +| 每月查询 | 受限于基础设施能力 | ~3,000,000 | +| 每个查询的成本 | 0美元 | $0.00004 | +| Infrastructure | 中心化 | 去中心化 | +| 工程费用 | 每小时200美元 | 包括在内 | +| 异地备援 | 每个额外节点的总成本为1200美元 | 包括在内 | +| 正常工作时间 | 变量 | 99.9%+ | +| 每月总成本 | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| 成本比较 | 自托管 | Graph网络 | -| :------------------: | :-------------------------------------------: | :----------------------------------------: | -| 每月服务器费用 \* | 1100美元每月每节点 | 0美元 | -| 查询成本 | 4000美元 | $1,200 per month | -| 需要的节点数量 | 10 | 不适用 | -| 工程时间 | 每月6000美元或以上 | 没有,内置在具有全球去中心化索引人的网络中 | -| 每月查询 | 受限于基础设施能力 | ~30,000,000 | -| 每个查询的成本 | 0美元 | $0.00004 | -| 基础设施 | 中心化 | 去中心化 | -| 异地备援 | 每个额外节点的总成本为1200美元 | 包括在内 | -| 正常工作时间 | 变量 | 99.9%+ | -| 每月总成本 | 11000+美元 | $1,200 | +| 成本比较 | 自托管 | Graph网络 | +| :--------------: | :-----------------------------------------: | :-------------------: | +| 每月服务器费用 \* | 1100美元每月每节点 | 0美元 | +| 查询成本 | 4000美元 | $1,200 per month | +| 需要的节点数量 | 10 | 不适用 | +| 工程时间 | 每月6000美元或以上 | 没有,内置在具有全球去中心化索引者的网络中 | +| 每月查询 | 受限于基础设施能力 | ~30,000,000 | +| 每个查询的成本 | 0美元 | $0.00004 | +| Infrastructure | 中心化 | 去中心化 | +| 异地备援 | 每个额外节点的总成本为1200美元 | 包括在内 | +| 正常工作时间 | 变量 | 99.9%+ | +| 每月总成本 | $11,000+ | $1,200 | - 包括后备费用: 每月$50-$100美元 @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe 在一个子图上策划信号是一个可选一次性净零成本(例如,1千美元的信号可以在一个子图上管理,然后撤回ーー在这个过程中有可能获得回报)。 -## 无安装成本和更高的运行效率 +## No Setup Costs & Greater Operational Efficiency 零安装费。立即开始,没有设置或间接费用。没有硬件要求。没有由于集中式基础设施而导致的中断,并且有更多的时间专注于您的核心产品。不需要备份服务器、故障排除或昂贵的工程资源。 -## 可靠性和弹性 +## Reliability & Resiliency The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -一句话: 与在本地运行一个`graph-node`相比,Graph网络成本更低,更容易使用,并且产生更好的结果。 +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From ed76ce1a65f2c5e1a2ffaca2f1f1d13444bbd867 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:45 -0500 Subject: [PATCH 0431/1534] New translations benefits.mdx (Urdu (Pakistan)) --- website/src/pages/ur/resources/benefits.mdx | 82 ++++++++++----------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/ur/resources/benefits.mdx b/website/src/pages/ur/resources/benefits.mdx index eb5291212ad0..c2ee622f7872 100644 --- a/website/src/pages/ur/resources/benefits.mdx +++ b/website/src/pages/ur/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: گراف نیٹ ورکنگ بمقابلہ سیلف ہوسٹنگ +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- گراف کے ڈیسینٹرالائزڈ نیٹ ورک کو ایک مضبوط انڈیکسنگ اور کیوری کا تجربہ بنانے کے لیے انجنیئر اور بہتر بنایا گیا ہے — اور یہ دنیا بھر میں ہزاروں تعاون کرنے والوں کی بدولت ہر روز بہتر ہوتا جا رہا ہے. -اس ڈیسینٹرالائزڈ پروٹوکول کے فوائد کو مقامی طور پر `graph-node` چلا کر نقل نہیں کیا جا سکتا۔ گراف نیٹ ورک زیادہ قابل اعتماد، زیادہ موثر اور کم مہنگا ہے. +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. یہاں ایک تجزیہ ہے: @@ -19,7 +19,7 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg ## فوائد کی وضاحت کی -### زیریں اور زیادہ لچکدار لاگت کا ڈھانچہ +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| اخراجات کا موازنہ | خود میزبان | The Graph Network | -| :-: | :-: | :-: | -| ماہانہ سرور کی قیمت/\* | $350 فی مہینہ | $0 | -| استفسار کے اخراجات | $0+ | $0 per month | -| انجینئرنگ کا وقت | $400 فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | -| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | 100,000 (Free Plan) | -| قیمت فی سوال | $0 | $0 | -| بنیادی ڈھانچہ | سینٹرلائزڈ | ڈیسینٹرلائزڈ | -| جغرافیائی فالتو پن | $750+ فی اضافی نوڈ | شامل | -| اپ ٹائم | اتار چڑھاو | 99.9%+ | -| کل ماہانہ اخراجات | $750+ | $0 | +| اخراجات کا موازنہ | خود میزبان | The Graph Network | +| :--------------------------: | :-------------------------------------: | :-----------------------------------------------------------------------------: | +| ماہانہ سرور کی قیمت/\* | $350 فی مہینہ | $0 | +| استفسار کے اخراجات | $0+ | $0 per month | +| انجینئرنگ کا وقت | $400 فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | +| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | 100,000 (Free Plan) | +| قیمت فی سوال | $0 | $0 | +| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | +| جغرافیائی فالتو پن | $750+ فی اضافی نوڈ | شامل | +| اپ ٹائم | اتار چڑھاو | 99.9%+ | +| کل ماہانہ اخراجات | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| اخراجات کا موازنہ | خود میزبان | The Graph Network | -| :-: | :-: | :-: | -| ماہانہ سرور کی قیمت/\* | $350 فی مہینہ | $0 | -| استفسار کے اخراجات | $500 فی مہینہ | $120 per month | -| انجینئرنگ کا وقت | $800 فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | -| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | ~3,000,000 | -| قیمت فی سوال | $0 | $0.00004 | -| بنیادی ڈھانچہ | سینٹرلائزڈ | ڈیسینٹرلائزڈ | -| انجینئرنگ کے اخراجات | $200 فی گھنٹہ | شامل | -| جغرافیائی فالتو پن | فی اضافی نوڈ کل اخراجات میں $1,200 | شامل | -| اپ ٹائم | اتار چڑھاو | 99.9%+ | -| کل ماہانہ اخراجات | $1,650+ | $120 | +| اخراجات کا موازنہ | خود میزبان | The Graph Network | +| :--------------------------: | :----------------------------------------: | :-----------------------------------------------------------------------------: | +| ماہانہ سرور کی قیمت/\* | $350 فی مہینہ | $0 | +| استفسار کے اخراجات | $500 فی مہینہ | $120 per month | +| انجینئرنگ کا وقت | $800 فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | +| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | ~3,000,000 | +| قیمت فی سوال | $0 | $0.00004 | +| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | +| انجینئرنگ کے اخراجات | $200 فی گھنٹہ | شامل | +| جغرافیائی فالتو پن | فی اضافی نوڈ کل اخراجات میں $1,200 | شامل | +| اپ ٹائم | اتار چڑھاو | 99.9%+ | +| کل ماہانہ اخراجات | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| اخراجات کا موازنہ | سیلف ہوسٹڈ | The Graph Network | -| :-: | :-: | :-: | -| ماہانہ سرور کی قیمت/\* | $1100 فی مہینہ، فی نوڈ | $0 | -| استفسار کے اخراجات | $4000 | $1,200 per month | -| نوڈس کی تعداد درکار ہے | 10 | قابل اطلاق نہیں | -| انجینئرنگ کا وقت | $6,000 یا اس سے زیادہ فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | -| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | ~30,000,000 | -| قیمت فی سوال | $0 | $0.00004 | -| بنیادی ڈھانچہ | سینٹرلائزڈ | ڈیسینٹرلائزڈ | -| جغرافیائی فالتو پن | فی اضافی نوڈ کل اخراجات میں $1,200 | شامل | -| اپ ٹائم | اتار چڑھاو | 99.9%+ | -| کل ماہانہ اخراجات | $11,000+ | $1,200 | +| اخراجات کا موازنہ | خود میزبان | The Graph Network | +| :--------------------------: | :-----------------------------------------: | :-----------------------------------------------------------------------------: | +| ماہانہ سرور کی قیمت/\* | $1100 فی مہینہ، فی نوڈ | $0 | +| استفسار کے اخراجات | $4000 | $1,200 per month | +| نوڈس کی تعداد درکار ہے | 10 | قابل اطلاق نہیں | +| انجینئرنگ کا وقت | $6,000 یا اس سے زیادہ فی مہینہ | کوئی بھی نہیں، عالمی سطح پر تقسیم شدہ انڈیکسرز کے ساتھ نیٹ ورک میں بنایا گیا ہے | +| فی مہینہ سوالات | بنیادی صلاحیتوں تک محدود | ~30,000,000 | +| قیمت فی سوال | $0 | $0.00004 | +| Infrastructure | سینٹرلائزڈ | ڈیسینٹرلائزڈ | +| جغرافیائی فالتو پن | فی اضافی نوڈ کل اخراجات میں $1,200 | شامل | +| اپ ٹائم | اتار چڑھاو | 99.9%+ | +| کل ماہانہ اخراجات | $11,000+ | $1,200 | /\*بیک اپ کے اخراجات سمیت: $50-$100 فی مہینہ @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe سب گراف پر کیوریٹنگ سگنل ایک اختیاری ایک بار، خالص صفر لاگت ہے (مثال کے طور پر، $1k سگنل کو سب گراف پر کیوریٹ کیا جا سکتا ہے، اور بعد میں واپس لیا جا سکتا ہے—اس عمل میں منافع کمانے کی صلاحیت کے ساتھ). -## کوئی سیٹ اپ لاگت نہیں ہے اور زیادہ سے زیادہ آپریشنل کارکردگی +## No Setup Costs & Greater Operational Efficiency صفر سیٹ اپ فیس . بغیر کسی سیٹ اپ یا اوور ہیڈ اخراجات کے فوراً شروع کریں۔کوئی ہارڈ ویئر کی ضرورت نہیں ہے. مرکزی انفراسٹرکچر کی وجہ سے کوئی بندش نہیں،اور اپنی بنیادی مصنوعات پر توجہ دینے کے لیے زیادہ وقت ۔بیک اپ سرورز، ٹربل شوٹنگ، یا مہنگے انجینئرنگ وسائل کی ضرورت نہیں. -## اعتبار اور لچک +## Reliability & Resiliency The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -نیچے کی سطر: گراف نیٹ ورک کم مہنگا ہے، استعمال میں آسان ہے، اور مقامی طور پر `graph-node` کو چلانے کے مقابلے میں بہتر نتائج پیدا کرتا ہے. +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 4fe8a26d46a273683ae64afdad2be60a02031d2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:45 -0500 Subject: [PATCH 0432/1534] New translations benefits.mdx (Vietnamese) --- website/src/pages/vi/resources/benefits.mdx | 72 ++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/vi/resources/benefits.mdx b/website/src/pages/vi/resources/benefits.mdx index fc108e7f6a79..f2427dd27282 100644 --- a/website/src/pages/vi/resources/benefits.mdx +++ b/website/src/pages/vi/resources/benefits.mdx @@ -1,5 +1,5 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| Cost Comparison | Self Hosted | Mạng The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| Cơ sở hạ tầng | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | +| Cost Comparison | Self Hosted | Mạng The Graph | +| :--------------------------: | :-------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $0+ | $0 per month | +| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | +| Cost per query | $0 | $0 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $750+ per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| Cost Comparison | Self Hosted | Mạng The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| Cơ sở hạ tầng | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | +| Cost Comparison | Self Hosted | Mạng The Graph | +| :--------------------------: | :----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $350 per month | $0 | +| Query costs | $500 per month | $120 per month | +| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~3,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Engineering expense | $200 per hour | Included | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| Cost Comparison | Self Hosted | Mạng The Graph | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| Cơ sở hạ tầng | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | +| Cost Comparison | Self Hosted | Mạng The Graph | +| :--------------------------: | :-----------------------------------------: | :-------------------------------------------------------------: | +| Monthly server cost\* | $1100 per month, per node | $0 | +| Query costs | $4000 | $1,200 per month | +| Number of nodes needed | 10 | Not applicable | +| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | +| Queries per month | Limited to infra capabilities | ~30,000,000 | +| Cost per query | $0 | $0.00004 | +| Infrastructure | Centralized | Decentralized | +| Geographic redundancy | $1,200 in total costs per additional node | Included | +| Uptime | Varies | 99.9%+ | +| Total Monthly Costs | $11,000+ | $1,200 | \*including costs for backup: $50-$100 per month From 5b77e85fbec04bbd06d36cbf51a7d679f480b411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:46 -0500 Subject: [PATCH 0433/1534] New translations benefits.mdx (Marathi) --- website/src/pages/mr/resources/benefits.mdx | 82 ++++++++++----------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/mr/resources/benefits.mdx b/website/src/pages/mr/resources/benefits.mdx index 33f0a7999983..d0d63b48418e 100644 --- a/website/src/pages/mr/resources/benefits.mdx +++ b/website/src/pages/mr/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: ग्राफ नेटवर्क वि. सेल्फ होस्टिंग +title: The Graph vs. Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- ग्राफचे विकेंद्रित नेटवर्क एक मजबूत अनुक्रमणिका आणि क्वेरी अनुभव तयार करण्यासाठी अभियंता आणि परिष्कृत केले गेले आहे — आणि जगभरातील हजारो योगदानकर्त्यांमुळे ते दररोज चांगले होत आहे. -या विकेंद्रित प्रोटोकॉलचे फायदे स्थानिक पातळीवर `ग्राफ-नोड` चालवून पुनरावृत्ती होऊ शकत नाहीत. ग्राफ नेटवर्क अधिक विश्वासार्ह, अधिक कार्यक्षम आणि कमी खर्चिक आहे. +The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. येथे एक विश्लेषण आहे: @@ -19,7 +19,7 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg ## फायदे स्पष्ट केले -### खालच्या & अधिक लवचिक खर्च संरचना +### Lower & more Flexible Cost Structure No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | -| :-: | :-: | :-: | -| मासिक सर्व्हर खर्च\* | दरमहा $350 | $0 | -| क्वेरी खर्च | $0+ | $0 per month | -| अभियांत्रिकी वेळ | दरमहा $400 | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | -| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | 100,000 (Free Plan) | -| प्रति क्वेरी खर्च | $0 | $0 | -| पायाभूत सुविधा | केंद्रीकृत | विकेंद्रित | -| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड $750+ | समाविष्ट | -| अपटाइम | बदलते | 99.9%+ | -| एकूण मासिक खर्च | $750+ | $0 | +| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | +| :--------------------------: | :-------------------------------------: | :----------------------------------------------------------------------: | +| मासिक सर्व्हर खर्च\* | दरमहा $350 | $0 | +| क्वेरी खर्च | $0+ | $0 per month | +| अभियांत्रिकी वेळ | दरमहा $400 | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | +| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | 100,000 (Free Plan) | +| प्रति क्वेरी खर्च | $0 | $0 | +| Infrastructure | केंद्रीकृत | विकेंद्रित | +| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड $750+ | समाविष्ट | +| अपटाइम | बदलते | 99.9%+ | +| एकूण मासिक खर्च | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | -| :-: | :-: | :-: | -| मासिक सर्व्हर खर्च\* | दरमहा $350 | $0 | -| क्वेरी खर्च | दरमहा $500 | $120 per month | -| अभियांत्रिकी वेळ | दरमहा $800 | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | -| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | ~3,000,000 | -| प्रति क्वेरी खर्च | $0 | $0.00004 | -| पायाभूत सुविधा | केंद्रीकृत | विकेंद्रित | -| अभियांत्रिकी खर्च | $200 प्रति तास | समाविष्ट | -| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड एकूण खर्चात $1,200 | समाविष्ट | -| अपटाइम | बदलते | 99.9%+ | -| एकूण मासिक खर्च | $1,650+ | $120 | +| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | +| :--------------------------: | :----------------------------------------: | :----------------------------------------------------------------------: | +| मासिक सर्व्हर खर्च\* | दरमहा $350 | $0 | +| क्वेरी खर्च | दरमहा $500 | $120 per month | +| अभियांत्रिकी वेळ | दरमहा $800 | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | +| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | ~3,000,000 | +| प्रति क्वेरी खर्च | $0 | $0.00004 | +| Infrastructure | केंद्रीकृत | विकेंद्रित | +| अभियांत्रिकी खर्च | $200 प्रति तास | समाविष्ट | +| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड एकूण खर्चात $1,200 | समाविष्ट | +| अपटाइम | बदलते | 99.9%+ | +| एकूण मासिक खर्च | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | -| :-: | :-: | :-: | -| मासिक सर्व्हर खर्च\* | प्रति नोड, प्रति महिना $1100 | $0 | -| क्वेरी खर्च | $4000 | $1,200 per month | -| आवश्यक नोड्सची संख्या | 10 | लागू नाही | -| अभियांत्रिकी वेळ | दरमहा $6,000 किंवा अधिक | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | -| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | ~30,000,000 | -| प्रति क्वेरी खर्च | $0 | $0.00004 | -| पायाभूत सुविधा | केंद्रीकृत | विकेंद्रित | -| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड एकूण खर्चात $1,200 | समाविष्ट | -| अपटाइम | बदलते | 99.9%+ | -| एकूण मासिक खर्च | $11,000+ | $1,200 | +| खर्चाची तुलना | स्वत: होस्ट केलेले | आलेख नेटवर्क | +| :--------------------------: | :-----------------------------------------: | :----------------------------------------------------------------------: | +| मासिक सर्व्हर खर्च\* | प्रति नोड, प्रति महिना $1100 | $0 | +| क्वेरी खर्च | $4000 | $1,200 per month | +| आवश्यक नोड्सची संख्या | 10 | लागू नाही | +| अभियांत्रिकी वेळ | दरमहा $6,000 किंवा अधिक | काहीही नाही, जागतिक स्तरावर वितरित इंडेक्सर्ससह नेटवर्कमध्ये तयार केलेले | +| प्रति महिना प्रश्न | इन्फ्रा क्षमतांपुरती मर्यादित | ~30,000,000 | +| प्रति क्वेरी खर्च | $0 | $0.00004 | +| Infrastructure | केंद्रीकृत | विकेंद्रित | +| भौगोलिक रिडंडंसी | प्रति अतिरिक्त नोड एकूण खर्चात $1,200 | समाविष्ट | +| अपटाइम | बदलते | 99.9%+ | +| एकूण मासिक खर्च | $11,000+ | $1,200 | \*बॅकअपच्या खर्चासह: $50-$100 प्रति महिना @@ -79,14 +79,14 @@ Estimated costs are only for Ethereum Mainnet subgraphs — costs are even highe सबग्राफवर क्युरेटिंग सिग्नल हा पर्यायी एक-वेळचा, निव्वळ-शून्य खर्च आहे (उदा., $1k सिग्नल सबग्राफवर क्युरेट केला जाऊ शकतो आणि नंतर मागे घेतला जाऊ शकतो—प्रक्रियेत परतावा मिळविण्याच्या संभाव्यतेसह). -## कोणतेही सेटअप खर्च नाही & ग्रेटर ऑपरेशनल कार्यक्षमता +## No Setup Costs & Greater Operational Efficiency शून्य सेटअप शुल्क. कोणत्याही सेटअप किंवा ओव्हरहेड खर्चाशिवाय त्वरित प्रारंभ करा. हार्डवेअर आवश्यकता नाही. केंद्रीकृत पायाभूत सुविधांमुळे कोणतेही आउटेज नाही आणि तुमच्या मुख्य उत्पादनावर लक्ष केंद्रित करण्यासाठी अधिक वेळ. बॅकअप सर्व्हर, समस्यानिवारण किंवा महागड्या अभियांत्रिकी संसाधनांची आवश्यकता नाही. -## विश्वसनीयता & लवचिकता +## Reliability & Resiliency The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -तळ ओळ: ग्राफ नेटवर्क कमी खर्चिक आहे, वापरण्यास सोपे आहे आणि स्थानिक पातळीवर `ग्राफ-नोड` चालवण्याच्या तुलनेत उत्कृष्ट परिणाम देते. +Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). From 589b1848e385b723106d556dea3bf26f439132af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:47 -0500 Subject: [PATCH 0434/1534] New translations benefits.mdx (Hindi) --- website/src/pages/hi/resources/benefits.mdx | 88 ++++++++++----------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/website/src/pages/hi/resources/benefits.mdx b/website/src/pages/hi/resources/benefits.mdx index cafb61355a28..7abab6970130 100644 --- a/website/src/pages/hi/resources/benefits.mdx +++ b/website/src/pages/hi/resources/benefits.mdx @@ -1,11 +1,11 @@ --- -title: ग्राफ नेटवर्क बनाम सेल्फ होस्टिंग +title: The Graph बनाम स्वयं होस्टिंग socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- ग्राफ़ के विकेन्द्रीकृत नेटवर्क को एक मजबूत अनुक्रमण और क्वेरी अनुभव बनाने के लिए इंजीनियर और परिष्कृत किया गया है - और यह दुनिया भर के हजारों योगदानकर्ताओं के लिए हर दिन बेहतर हो रहा है। -इस विकेंद्रीकृत प्रोटोकॉल के लाभों को `ग्राफ-नोड` स्थानीय स्तर पर चलाकर दोहराया नहीं जा सकता है। ग्राफ़ नेटवर्क अधिक विश्वसनीय, अधिक कुशल और कम खर्चीला है। +इस विकेंद्रीकृत प्रोटोकॉल के लाभों की नकल `ग्राफ-नोड`को लोकल रूप से चलाकर नहीं की जा सकती। The Graph Network अधिक विश्वसनीय, अधिक प्रभावी, और कम महंगा है। यहाँ एक विश्लेषण है: @@ -19,7 +19,7 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg ## लाभ समझाया -### निचला & अधिक लचीली लागत संरचना +### कम और अधिक लचीला लागत संरचना No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. @@ -27,47 +27,47 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar ## Low Volume User (less than 100,000 queries per month) -| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | -| :-: | :-: | :-: | -| मासिक सर्वर लागत\* | $350 प्रति माह | $0 | -| पूछताछ लागत | $0+ | $0 per month | -| इंजीनियरिंग का समय | $ 400 प्रति माह | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | -| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | 100,000 (Free Plan) | -| लागत प्रति क्वेरी | $0 | $0 | -| आधारभूत संरचना | केंद्रीकृत | विकेन्द्रीकृत | -| भौगोलिक अतिरेक | $750+ प्रति अतिरिक्त नोड | शामिल | -| अपटाइम | भिन्न | 99.9%+ | -| कुल मासिक लागत | $750+ | $0 | +| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | +| :----------------------------: | :-------------------------------------: | :--------------------------------------------------------------------: | +| मासिक सर्वर लागत\* | $350 प्रति माह | $0 | +| पूछताछ लागत | $0+ | $0 per month | +| इंजीनियरिंग का समय | $ 400 प्रति माह | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | +| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | 100,000 (Free Plan) | +| लागत प्रति क्वेरी | $0 | $0 | +| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | +| भौगोलिक अतिरेक | $750+ प्रति अतिरिक्त नोड | शामिल | +| अपटाइम | भिन्न | 99.9%+ | +| कुल मासिक लागत | $750+ | $0 | ## Medium Volume User (~3M queries per month) -| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | -| :-: | :-: | :-: | -| मासिक सर्वर लागत\* | $350 प्रति माह | $0 | -| पूछताछ लागत | $ 500 प्रति माह | $120 per month | -| इंजीनियरिंग का समय | $800 प्रति माह | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | -| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | ~3,000,000 | -| लागत प्रति क्वेरी | $0 | $0.00004 | -| आधारभूत संरचना | केंद्रीकृत | विकेन्द्रीकृत | -| इंजीनियरिंग खर्च | $ 200 प्रति घंटा | शामिल | -| भौगोलिक अतिरेक | प्रति अतिरिक्त नोड कुल लागत में $1,200 | शामिल | -| अपटाइम | भिन्न | 99.9%+ | -| कुल मासिक लागत | $1,650+ | $120 | +| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | +| :----------------------------: | :----------------------------------------: | :--------------------------------------------------------------------: | +| मासिक सर्वर लागत\* | $350 प्रति माह | $0 | +| पूछताछ लागत | $ 500 प्रति माह | $120 per month | +| इंजीनियरिंग का समय | $800 प्रति माह | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | +| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | ~3,000,000 | +| लागत प्रति क्वेरी | $0 | $0.00004 | +| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | +| इंजीनियरिंग खर्च | $ 200 प्रति घंटा | शामिल | +| भौगोलिक अतिरेक | प्रति अतिरिक्त नोड कुल लागत में $1,200 | शामिल | +| अपटाइम | भिन्न | 99.9%+ | +| कुल मासिक लागत | $1,650+ | $120 | ## High Volume User (~30M queries per month) -| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | -| :-: | :-: | :-: | -| मासिक सर्वर लागत\* | $1100 प्रति माह, प्रति नोड | $0 | -| पूछताछ लागत | $4000 | $1,200 per month | -| आवश्यक नोड्स की संख्या | 10 | Not applicable | -| इंजीनियरिंग का समय | $6,000 or more per month | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | -| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | ~30,000,000 | -| लागत प्रति क्वेरी | $0 | $0.00004 | -| आधारभूत संरचना | केंद्रीकृत | विकेन्द्रीकृत | -| भौगोलिक अतिरेक | प्रति अतिरिक्त नोड कुल लागत में $1,200 | शामिल | -| अपटाइम | भिन्न | 99.9%+ | -| कुल मासिक लागत | $11,000+ | $1,200 | +| लागत तुलना | स्वयं होस्ट किया गया | The Graph Network | +| :----------------------------: | :-----------------------------------------: | :--------------------------------------------------------------------: | +| मासिक सर्वर लागत\* | $1100 प्रति माह, प्रति नोड | $0 | +| पूछताछ लागत | $4000 | $1,200 per month | +| आवश्यक नोड्स की संख्या | 10 | Not applicable | +| इंजीनियरिंग का समय | $6,000 or more per month | कोई नहीं, विश्व स्तर पर वितरित इंडेक्सर्स के साथ नेटवर्क में बनाया गया | +| प्रति माह प्रश्न | इन्फ्रा क्षमताओं तक सीमित | ~30,000,000 | +| लागत प्रति क्वेरी | $0 | $0.00004 | +| Infrastructure | केंद्रीकृत | विकेन्द्रीकृत | +| भौगोलिक अतिरेक | प्रति अतिरिक्त नोड कुल लागत में $1,200 | शामिल | +| अपटाइम | भिन्न | 99.9%+ | +| कुल मासिक लागत | $11,000+ | $1,200 | \*बैकअप की लागत सहित: $50-$100 प्रति माह @@ -75,18 +75,18 @@ Query costs may vary; the quoted cost is the average at time of publication (Mar Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/archived/arbitrum/arbitrum-faq/) are substantially lower than Ethereum mainnet. +एस्टिमेटेड लागत केवल Ethereum Mainnet सबग्राफ़ के लिए है — अन्य नेटवर्कों पर `ग्राफ-नोड` को स्वयं होस्ट करने पर लागत और भी अधिक होती है। कुछ उपयोगकर्ताओं को अपने Subgraph को एक नई संस्करण में अपडेट करने की आवश्यकता हो सकती है। Ethereum गैस शुल्क के कारण, एक अपडेट की लागत लगभग ~$50 है जब लेख लिखा गया था। ध्यान दें कि [Arbitrum](/archived/arbitrum/arbitrum-faq/) पर गैस शुल्क Ethereum mainnet से काफी कम हैं। एक सबग्राफ पर क्यूरेटिंग सिग्नल एक वैकल्पिक वन-टाइम, नेट-जीरो कॉस्ट है (उदाहरण के लिए, सिग्नल में $1k को सबग्राफ पर क्यूरेट किया जा सकता है, और बाद में वापस ले लिया जाता है - प्रक्रिया में रिटर्न अर्जित करने की क्षमता के साथ)। -## कोई सेटअप लागत नहीं & ग्रेटर ऑपरेशनल एफिशिएंसी +## कोई सेटअप लागत नहीं और अधिक परिचालन दक्षता शून्य सेटअप शुल्क। बिना किसी सेटअप या ओवरहेड लागत के तुरंत आरंभ करें। कोई हार्डवेयर आवश्यकताएँ नहीं। केंद्रीकृत बुनियादी ढांचे के कारण कोई आउटेज नहीं, और अपने मूल उत्पाद पर ध्यान केंद्रित करने के लिए अधिक समय। बैकअप सर्वर, समस्या निवारण या महंगे इंजीनियरिंग संसाधनों की कोई आवश्यकता नहीं है। -## Reliability & Resiliency +## विश्वसनीयता और लचीलापन -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. +The Graph का विकेन्द्रीकृत नेटवर्क उपयोगकर्ताओं को भौगोलिक रेडंडेंसी तक पहुंच प्रदान करता है, जो कि जब आप `ग्राफ-नोड` को स्वयं होस्ट करते हैं तो मौजूद नहीं होता। 99.9%+ अपटाइम के साथ queries को विश्वसनीय रूप से सेवा प्रदान की जाती है, जो कि सैकड़ों स्वतंत्र Indexers द्वारा नेटवर्क को वैश्विक स्तर पर सुरक्षित किए जाने से प्राप्त होती है। -निचला रेखा: ग्राफ़ नेटवर्क कम खर्चीला है, उपयोग में आसान है, और `ग्राफ़-नोड` को स्थानीय रूप से चलाने की तुलना में बेहतर परिणाम देता है। +The Graph Network कम खर्चीला, उपयोग में आसान और बेहतर परिणाम प्रदान करता है, जब की graph-node को लोकल पर चलाने के मुकाबले। -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/subgraphs/quick-start/). +आज ही The Graph Network का उपयोग शुरू करें, और सीखें कि कैसे [अपने subgraph को The Graph के विकेंद्रीकृत नेटवर्क पर प्रकाशित](/subgraphs/quick-start/) करें। From 921b8c200174424e006cb78d4279b06050d88edc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:48 -0500 Subject: [PATCH 0435/1534] New translations glossary.mdx (Romanian) --- website/src/pages/ro/resources/glossary.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ro/resources/glossary.mdx b/website/src/pages/ro/resources/glossary.mdx index bdeff7281023..ffcd4bca2eed 100644 --- a/website/src/pages/ro/resources/glossary.mdx +++ b/website/src/pages/ro/resources/glossary.mdx @@ -22,6 +22,8 @@ title: Glossary - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -42,7 +44,7 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: Glossary - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From 2866089bd108369ea895fc9895ebd7833dc75c0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:49 -0500 Subject: [PATCH 0436/1534] New translations glossary.mdx (French) --- website/src/pages/fr/resources/glossary.mdx | 68 +++++++++++---------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/website/src/pages/fr/resources/glossary.mdx b/website/src/pages/fr/resources/glossary.mdx index ebe5e6e88668..cfaa0beb4c78 100644 --- a/website/src/pages/fr/resources/glossary.mdx +++ b/website/src/pages/fr/resources/glossary.mdx @@ -2,77 +2,79 @@ title: Glossaire --- -- **The Graph**: un protocole décentralisé pour l'indexation et l'interrogation des données. +- **The Graph** : Un protocole décentralisé pour l'indexation et l'interrogation des données. -- **Query**: une requête de données. Dans le cas de The Graph, une requête est une demande de données provenant d'un subgraph qui sera traitée par un indexeur. +- **Query** : Une requête de données. Dans le cas de The Graph, une requête est une demande de données provenant d'un subgraph à laquelle répondra un Indexeur. -- **GraphQL** : Un langage de requête pour les API et un moteur d'exécution pour répondre à ces requêtes avec vos données existantes. Le graph utilise GraphQL pour interroger les subgraphs. +- **GraphQL** : Un langage de requête pour les API et un moteur d'exécution pour répondre à ces requêtes avec vos données existantes. The Graph utilise GraphQL pour interroger les subgraphs. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Endpoint** : Une URL qui peut être utilisée pour interroger un subgraph. L'endpoint de test pour Subgraph Studio est `https://api.studio.thegraph.com/query///` et l'endpoint pour Graph Explorer est `https://gateway.thegraph.com/api//subgraphs/id/`. L'endpoint Graph Explorer est utilisé pour interroger les subgraphs sur le réseau décentralisé de The Graph. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph** : Une API ouverte qui extrait des données d'une blockchain, les traite et les stocke de manière à ce qu'elles puissent être facilement interrogées via GraphQL. Les développeurs peuvent créer, déployer et publier des subgraphs sur The Graph Network. Une fois indexé, le subgraph peut être interrogé par n'importe qui. -- **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. +- **Indexeur** : Participants au réseau qui gèrent des nœuds d'indexation pour indexer les données des blockchains et répondre aux requêtes GraphQL. -- **Flux de revenus des indexeurs** : Les indexeurs sont récompensés en GRT avec deux composantes : les remises sur les frais de requête et les récompenses d'indexation. +- **Flux de revenus pour les Indexeurs** : Les Indexeurs sont récompensés en GRT par deux éléments : les remises sur les frais de requête et les récompenses pour l'indexation. - 1. **Remises sur les frais de requête** : paiements des consommateurs de subgraphs pour la réponse aux requêtes sur le réseau. + 1. **Remboursements de frais de requête** : Paiements effectués par les consommateurs de subgraphs pour avoir servi des requêtes sur le réseau. - 2. **Récompenses d'indexation** : les récompenses que les indexeurs reçoivent pour l'indexation des subgraphs. Les récompenses d'indexation sont générées par l'émission annuelle de 3 % de GRT. + 2. **Récompenses d'indexation** : Les récompenses que les Indexeurs reçoivent pour l'indexation des subgraphs. Les récompenses d'indexation sont générées par une nouvelle émission de 3 % de GRT par an. -- **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Indexer's Self-Stake** : Le montant de GRT que les Indexeurs stakent pour participer au réseau décentralisé. Le minimum est de 100 000 GRT, et il n'y a pas de limite supérieure. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Delegation Capacity** : C'est le montant maximum de GRT qu'un Indexeur peut accepter de la part des Déléguateurs. Les Indexeurs ne peuvent accepter que jusqu'à 16 fois leur propre Indexer Self-Stake, et toute délégation supplémentaire entraîne une dilution des récompenses. Par exemple, si un Indexeur a une Indexer Self-Stake de 1M GRT, sa capacité de délégation est de 16M. Cependant, les indexeurs peuvent augmenter leur capacité de délégation en augmentant leur Indexer Self-Stake. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Upgrade Indexer** : Un Indexeur conçu pour servir de solution de repli pour les requêtes de subgraphs qui ne sont pas traitées par d'autres Indexeurs sur le réseau. L'upgrade Indexer n'est pas compétitif par rapport aux autres Indexeurs. -- **Taxe de délégation** : Une taxe de 0,5 % payée par les délégués lorsqu'ils délèguent des GRT aux indexeurs. Les GRT utilisés pour payer la taxe sont brûlés. +- **Delegator**(Déléguateurs) : Participants au réseau qui possèdent des GRT et les délèguent à des Indexeurs. Cela permet aux Indexeurs d'augmenter leur participation dans les subgraphs du réseau. En retour, les Déléguateurs reçoivent une partie des récompenses d'indexation que les Indexeurs reçoivent pour le traitement des subgraphs. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Taxe de délégation** : Une taxe de 0,5 % payée par les Déléguateurs lorsqu'ils délèguent des GRT aux Indexeurs. Les GRT utilisés pour payer la taxe sont brûlés. -- **Taxe de curation** : Une taxe de 1% payée par les curateurs lorsqu'ils signalent des GRT sur des subgraphs. Le GRT utilisé pour payer la taxe est brûlé. +- **Curator**(Curateur) : Participants au réseau qui identifient les subgraphs de haute qualité et signalent les GRT sur ces derniers en échange de parts de curation. Lorsque les Indexeurs réclament des frais de requête pour un subgraph, 10 % sont distribués aux Curateurs de ce subgraph. Il existe une corrélation positive entre la quantité de GRT signalée et le nombre d'Indexeurs indexant un subgraph. -- **Data Consumer**: Any application or user that queries a subgraph. +- **Taxe de curation** : Une taxe de 1% payée par les Curateurs lorsqu'ils signalent des GRT sur des subgraphs. Les GRT utiliséa pour payer la taxe sont brûlés. -- **Développeur de subgraphs** : un développeur qui crée et déploie un subgraph sur le réseau décentralisé de The Graph. +- **Consommateur de données** : Toute application ou utilisateur qui interroge un subgraph. -- **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. +- **Développeur de subgraphs** : Un développeur qui construit et déploie un subgraph sur le réseau décentralisé de The Graph. -- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. +- **Manifeste du subgraph** : Un fichier YAML qui décrit le schéma GraphQL du subgraph, les sources de données et d'autres métadonnées. Vous trouverez [Ici](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) un exemple. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: +- **Epoque** : Unité de temps au sein du réseau. Actuellement, une époque correspond à 6 646 blocs, soit environ 1 jour. - 1. **Actif** : Une allocation est considérée comme active lorsqu'elle est créée sur la chaîne. Cela s'appelle ouvrir une allocation, et indique au réseau que l'indexeur indexe et sert activement les requêtes pour un subgraph particulier. Les allocations actives accumulent des récompenses d'indexation proportionnelles au signal sur le subgraph et à la quantité de GRT allouée. +- **Allocation** : Un Indexeur peut allouer l'ensemble de son staking de GRT (y compris le staking des Déléguateurs) à des subgraphs qui ont été publiés sur le réseau décentralisé de The Graph. Les allocations peuvent avoir différents statuts : - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 1. **Actif** : Une allocation est considérée comme active lorsqu'elle est créée onchain. C'est ce qu'on appelle ouvrir une allocation, et cela indique au réseau que l'Indexeur est en train d'indexer et de servir des requêtes pour un subgraph particulier. Les allocations actives accumulent des récompenses d'indexation proportionnelles au signal sur le subgraph et à la quantité de GRT allouée. -- **Subgraph Studio** : une application puissante pour créer, déployer et publier des subgraphs. + 2. **Fermé** : Un Indexeur peut réclamer les récompenses d'indexation accumulées sur un subgraph donné en soumettant une preuve d'indexation (POI) récente et valide. C'est ce qu'on appelle la fermeture d'une allocation. Une allocation doit avoir été ouverte pendant au moins une époque avant de pouvoir être fermée. La période d'allocation maximale est de 28 époques. Si un Indexeur laisse une allocation ouverte au-delà de 28 époques, il s'agit d'une allocation périmée. Lorsqu'une allocation est dans l'état **fermé**, un Fisherman peut encore ouvrir un litige pour contester un Indexeur pour avoir servi de fausses données. + +- **Subgraph Studio** : Une dapp puissante pour construire, déployer et publier des subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitres** : Les arbitres sont des participants au réseau nommés dans le cadre d'un processus de gouvernance. Le rôle de l'arbitre est de décider de l'issue des litiges relatifs à l'indexation et aux requêtes. Leur objectif est de maximiser l'utilité et la fiabilité de The Graph. -- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**(Taillade) : Les Indexeurs peuvent se voir retirer leur GRT pour avoir fourni un POI incorrect ou pour avoir diffusé des données inexactes. Le pourcentage de réduction est un paramètre protocolaire actuellement fixé à 2,5 % du staking personnel de l'Indexeur. 50 % des GRT réduit est versé au pêcheur qui a contesté les données inexactes ou le point d'intérêt incorrect. Les 50 % restants sont brûlés. -- **Récompenses d'indexation** : les récompenses que les indexeurs reçoivent pour l'indexation des subgraphs. Les récompenses d'indexation sont distribuées en GRT. +- **Récompenses d'indexation** : Les récompenses que les Indexeurs reçoivent pour l'indexation des subgraphs. Les récompenses d'indexation sont distribuées en GRT. -- **Récompenses de délégation** : Les récompenses que les délégués reçoivent pour avoir délégué des GRT aux indexeurs. Les récompenses de délégation sont distribuées dans les GRT. +- **Récompenses de délégation** : Les récompenses que les Déléguateurs reçoivent pour avoir délégué des GRT aux Indexeurs. Les récompenses de délégation sont distribuées en GRT. -- .**GRT** : le jeton d'utilité du travail de The Graph, le GRT offre des incitations économiques aux participants du réseau pour leur contribution au réseau. +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **The Graph Client** : une bibliothèque pour créer des dapps basées sur GraphQL de manière décentralisée. +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer** : Une application conçue pour que les participants au réseau puissent explorer les subgraphs et interagir avec le protocole. +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **Graph CLI** : Un outil d'interface de ligne de commande pour construire et déployer sur The Graph. +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **Période de récupération** : le temps restant jusqu'à ce qu'un indexeur qui a modifié ses paramètres de délégation puisse le faire à nouveau. +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. From a9c4d3371c97033a3494b634d8dccc9b6a0fdf5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:51 -0500 Subject: [PATCH 0437/1534] New translations glossary.mdx (Spanish) --- website/src/pages/es/resources/glossary.mdx | 42 +++++++++++---------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/website/src/pages/es/resources/glossary.mdx b/website/src/pages/es/resources/glossary.mdx index b2b41524b887..a3614062a63a 100644 --- a/website/src/pages/es/resources/glossary.mdx +++ b/website/src/pages/es/resources/glossary.mdx @@ -2,39 +2,41 @@ title: Glosario --- -- **The Graph**: Un protocolo descentralizado para indexar y consultar datos. +- **The Graph**: A decentralized protocol for indexing and querying data. -- **Consulta (Query)**: Consulta de datos. En el caso de The Graph, una consulta es una solicitud de datos de un subgrafo que será respondida por un Indexador. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: Un lenguaje de consulta para APIs y un tiempo de ejecución para cumplir esas consultas con datos existentes. The Graph utiliza GraphQL para consultar subgrafos. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **Endpoint**: Una URL que se puede utilizar para consultar un subgrafo. El endpoint de prueba para Subgraph Studio es `https://api.studio.thegraph.com/query///` y el endpoint de Graph Explorer es `https://gateway.thegraph.com/api//subgraphs/id/`. El endpoint de Graph Explorer se utiliza para consultar subgrafos en la red descentralizada de The Graph. +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. -- **Flujos de ingresos de los indexadores (Indexer Revenue Streams)**: Los Indexadores son recompensados en GRT con dos componentes: reembolsos de tarifas de consulta y recompensas de indexación. +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Reembolsos de Tarifas de Consultas (Query Fee Rebates)**: Pagos de los consumidores de subgrafos por servir consultas en la red. + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **Recompensas de Indexación (Indexing Rewards)**: Las recompensas que reciben los Indexadores por indexar subgrafos. Las recompensas de indexación se generan mediante una nueva emisión anual del 3% de GRT. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **Impuesto a la Delegación (Delegation Tax)**: Una tasa del 0,5% que pagan los Delegadores cuando delegan GRT en los Indexadores. El GRT utilizado para pagar la tasa se quema. +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **Impuesto a la Curación (Curation Tax)**: Una tasa del 1% pagada por los Curadores cuando señalan GRT en los subgrafos. El GRT utilizado para pagar la tasa se quema. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. - **Data Consumer**: Any application or user that queries a subgraph. -- **Developer de subgrafos (Subgraph developer)**: Developer que construye y realiza el deploy de un subgrafo en la red descentralizada de The Graph. +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. @@ -42,11 +44,11 @@ title: Glosario - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Activa (Active)**: Una allocation se considera activa cuando se crea on-chain. Esto se llama abrir una allocation, e indica a la red que el Indexador está indexando activamente y sirviendo consultas para un subgrafo en particular. Las allocations activas acumulan recompensas de indexación proporcionales a la señal del subgrafo y a la cantidad de GRT asignada. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: Una potente aplicación para crear, deployar y publicar subgrafos. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -54,25 +56,25 @@ title: Glosario - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Recompensas de Indexación (Indexing Rewards)**: Las recompensas que reciben los Indexadores por indexar subgrafos. Las recompensas de indexación se distribuyen en GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **Recompensas de Delegación (Delegation Rewards)**: Las recompensas que reciben los Delegadores por delegar GRT en los Indexadores. Las recompensas por delegación se distribuyen en GRT. +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: El token de utilidad de trabajo de The Graph. GRT ofrece incentivos económicos a los participantes en la red por contribuir a ella. +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **Cliente The Graph (The Graph Client)**: Una biblioteca para construir dapps basadas en GraphQL de forma descentralizada. +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Explorador de Graph (Graph Explorer)**: Una aplicación diseñada para que los participantes en la red exploren subgrafos e interactúen con el protocolo. +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **Graph CLI**: Una herramienta de interfaz de línea de comandos para construir y realizar deploys en The Graph. +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **Período de enfriamiento (Cooldown Period)**: El tiempo restante hasta que un Indexador que cambió sus parámetros de delegación pueda volver a hacerlo. +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. From 15f7c4bc6b54a947e8ae4ad483fda7c92a96ad54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:51 -0500 Subject: [PATCH 0438/1534] New translations glossary.mdx (Arabic) --- website/src/pages/ar/resources/glossary.mdx | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ar/resources/glossary.mdx b/website/src/pages/ar/resources/glossary.mdx index 6e4fbeab2e85..f922950390a6 100644 --- a/website/src/pages/ar/resources/glossary.mdx +++ b/website/src/pages/ar/resources/glossary.mdx @@ -2,13 +2,13 @@ title: قائمة المصطلحات --- -- **الغراف**: بروتوكول لامركزي لفهرسة البيانات والاستعلام عنها. +- **The Graph**: A decentralized protocol for indexing and querying data. -- **الاستعلام**: طلب للحصول على البيانات. في حالة الغراف، الاستعلام هو طلب للحصول على البيانات من غراف فرعي ما وسيتم الرد عليه بواسطة مفهرس ما. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: لغة استعلام لواجهات برمجة التطبيقات (APIs) يستخدم GraphQL للاستعلام عن السوبغرافات. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **نقطة النهاية (Endpoint)**: عنوان URL يمكن استخدامه للاستعلام عن سبغراف. نقطة الاختبار لـ سبغراف استوديو هي: `https://api.studio.thegraph.com/query///` ونقطة نهاية مستكشف الغراف هي: `https://gateway.thegraph.com/api//subgraphs/id/` تُستخدم نقطة نهاية مستكشف الغراف للاستعلام عن سبغرافات على شبكة الغراف اللامركزية. +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. @@ -16,12 +16,14 @@ title: قائمة المصطلحات - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **خصومات رسوم الاستعلام**: هي المدفوعات التي يتلقاها مستهلكو الغرافات الفرعية مقابل تقديم الاستعلامات على الشبكة. + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -34,15 +36,15 @@ title: قائمة المصطلحات - **Data Consumer**: Any application or user that queries a subgraph. -- **مطور السوبغراف**: هو المطور الذي يقوم ببناء ونشر السوبغراف على شبكة الغراف اللامركزية. +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. -- **الحقبة (Epoch)**: وحدة زمنية داخل الشبكة. حاليًا، تتألف الحقبة من 6,646 كتلة أو تقريبًا يوم واحد. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: قائمة المصطلحات - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From 4b6e87f5375e99754518166a4aa9ab673bd0c5c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:53 -0500 Subject: [PATCH 0439/1534] New translations glossary.mdx (Czech) --- website/src/pages/cs/resources/glossary.mdx | 50 +++++++++++---------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/website/src/pages/cs/resources/glossary.mdx b/website/src/pages/cs/resources/glossary.mdx index 5e3859945744..70161f581585 100644 --- a/website/src/pages/cs/resources/glossary.mdx +++ b/website/src/pages/cs/resources/glossary.mdx @@ -2,80 +2,82 @@ title: Glosář --- -- **Graf**: Decentralizovaný protokol pro indexování a dotazování dat. +- **The Graph**: A decentralized protocol for indexing and querying data. -- **Dotaz**: Požadavek na data. V případě Grafu je dotaz požadavek na data z podgrafu, na který odpoví indexer. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: Je to dotazovací jazyk pro API a runtime pro plnění těchto dotazů s existujícími daty. Graf používá GraphQL k dotazování dílčích grafů. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **Koncový bod**: URL, které lze použít k dotazu na podgraf. Testovací koncový bod pro Podgraf Studio je `https://api.studio.thegraph.com/query///` a koncový bod Graf Exploreru je `https://gateway.thegraph.com/api//subgraphs/id/`. Koncový bod Graf Explorer se používá k dotazování podgrafů v decentralizované síti Graf. +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. -- **Příjmy indexátorů**: Indexátoři jsou v GRT odměňováni dvěma složkami: slevami z poplatků za dotazy a odměnami za indexování. +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Vrácení poplatků za dotaz**: Platby od konzumentů podgrafů za obsluhu dotazů v síti. + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **Odměny za indexování**: Odměny, které indexátory obdrží za indexování podgrafů. Odměny za indexování jsou generovány prostřednictvím nové emise 3% GRT ročně. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **Delegační daň**: 0.5% poplatek, který platí delegáti, když delegují GRT na indexátory. GRT použitý k úhradě poplatku se spálí. +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **Curation Tax**: Kurátoři platí 1% poplatek, když signalizují GRT na podgraf GRT použitý k zaplacení poplatku se spálí. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. - **Data Consumer**: Any application or user that queries a subgraph. -- **Vývojář podgrafů**: Vývojář, který vytváří a nasazuje subgraf do decentralizované sítě Grafu. +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. -- **Epoch**: Časová jednotka v rámci sítě. V současné době je jedna epocha 6,646 bloků nebo přibližně 1 den. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Alokace**: Indexátor může přidělit svůj celkový podíl GRT (včetně podílu delegátů) na podgrafy, které byly zveřejněny v decentralizované síti Graf. Alokace mohou mít různé stavy: +- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Aktivní**: Alokace je považována za aktivní, když je vytvořena v řetězci. Tomu se říká otevření alokace a signalizuje síti, že indexátor aktivně indexuje a obsluhuje dotazy pro daný podgraf. Aktivní alokace získávají odměny za indexování úměrné signálu na podgrafu a množství alokovaného GRT. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Podgraf Studio**: Výkonná aplikace pro vytváření, nasazování a publikování podgrafů. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Rozhodčí**: Rozhodci jsou účastníci sítě jmenovaní v rámci procesu řízení. Úkolem arbitra je rozhodovat o výsledku sporů týkajících se indexace a dotazů. Jejich cílem je maximalizovat užitečnost a spolehlivost sítě Graf. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Odměny za indexování**: Odměny, které indexátory obdrží za indexování podgrafů. Odměny za indexování se rozdělují v GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **Odměny za delegování**: Odměny, které delegáti obdrží za delegování GRT na indexátory. Odměny za delegování se rozdělují v GRT. +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: Token pracovního nástroje Grafu. GRT poskytuje účastníkům sítě ekonomické pobídky za přispívání do sítě. +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **Klient grafu**: Knihovna pro decentralizované vytváření dapps na bázi GraphQL. +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Průzkumník grafů**: dapp určená pro účastníky sítě k prozkoumávání podgrafů a interakci s protokolem. +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **Graf CLI**: Nástroj rozhraní příkazového řádku pro vytváření a nasazování do Grapfu. +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **Období vychladnutí**: Doba, která zbývá do doby, než indexátor, který změnil své parametry delegování, může tuto změnu provést znovu. +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. - **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrace**: Proces sdílení kurátorů, při kterém se přechází ze staré verze podgrafu na novou verzi podgrafu (např. při aktualizaci verze v0.0.1 na verzi v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 3e781c89d7503830c1270eecb6f61d3fa53bf1f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:54 -0500 Subject: [PATCH 0440/1534] New translations glossary.mdx (German) --- website/src/pages/de/resources/glossary.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/resources/glossary.mdx b/website/src/pages/de/resources/glossary.mdx index bdeff7281023..ffcd4bca2eed 100644 --- a/website/src/pages/de/resources/glossary.mdx +++ b/website/src/pages/de/resources/glossary.mdx @@ -22,6 +22,8 @@ title: Glossary - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -42,7 +44,7 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: Glossary - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From a99e7485fd95164accc04323016f0b2212577e6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:55 -0500 Subject: [PATCH 0441/1534] New translations glossary.mdx (Italian) --- website/src/pages/it/resources/glossary.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/src/pages/it/resources/glossary.mdx b/website/src/pages/it/resources/glossary.mdx index bdeff7281023..ffcd4bca2eed 100644 --- a/website/src/pages/it/resources/glossary.mdx +++ b/website/src/pages/it/resources/glossary.mdx @@ -22,6 +22,8 @@ title: Glossary - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -42,7 +44,7 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: Glossary - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From 8384807518da02abc8ef2adb771ad6cb029ece7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:56 -0500 Subject: [PATCH 0442/1534] New translations glossary.mdx (Japanese) --- website/src/pages/ja/resources/glossary.mdx | 48 +++++++++++---------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/website/src/pages/ja/resources/glossary.mdx b/website/src/pages/ja/resources/glossary.mdx index ced046d1391e..c71697a009cf 100644 --- a/website/src/pages/ja/resources/glossary.mdx +++ b/website/src/pages/ja/resources/glossary.mdx @@ -2,80 +2,82 @@ title: 用語集 --- -- **The Graph**。データのインデックス(索引付け)とクエリ(問い合わせ)のための分散型プロトコル。 +- **The Graph**: A decentralized protocol for indexing and querying data. -- **クエリ**:データに対する要求。The Graphの場合、クエリとは、インデクサーが回答するサブグラフのデータに対するリクエストのことです。 +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**:API用のクエリ言語であり、既存のデータでクエリを実行するためのランタイムです。グラフは、サブグラフのクエリにGraphQLを使用しています。 +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **エンドポイント**: サブグラフのクエリに使用できる URL。 Subgraph Studio のテスト エンドポイントは `https://api.studio.thegraph.com/query///` であり、Graph Explorer エンドポイントは `https: //gateway.thegraph.com/api//subgraphs/id/`. Graph Explorer エンドポイントは、The Graph の分散型ネットワーク上のサブグラフをクエリするために使用されます。 +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. -- **インデクサー報酬**:GRTでは、インデクサー報酬は、クエリ料金のリベートとインデックスの報酬の2つの要素で成り立っています。 +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **クエリ料**:ネットワーク上でクエリを提供するサブグラフ消費者から支払われます。 + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **インデックス作成報酬**:インデクサーが、サブグラフのインデックス作成に対して受け取る報酬です。インデックス報酬は、毎年3%のGRTを新規に発行することで発生します。 + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **デリゲーション・タックス**。デリゲーターがインデクサーにGRTを委任する際に支払う0.5%の手数料です。手数料の支払いに使われたGRTはバーンされます。 +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **キュレーション税**。キュレーターがサブグラフにGRTのシグナルを送る際に支払う1%の手数料。手数料を支払うために使用されるGRTはバーンされます。 +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. - **Data Consumer**: Any application or user that queries a subgraph. -- **サブグラフ・デベロッパー**:The Graphの分散型ネットワークにサブグラフを構築し、デプロイする開発者のことです。 +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. -- **エポック**: ネットワーク内の時間の単位。現在、1エポックは6,646ブロック、または約1日です。 +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **アクティブ**:アロケーションは、オンチェーンで作成されたときにアクティブとみなされます。これはアロケーションを開くと呼ばれ、インデクサーが特定のサブグラフのために積極的にインデックスを作成し、クエリを提供していることをネットワークに示しています。アクティブなアロケーションは、サブグラフ上のシグナルと割り当てられたGRTの量に比例してインデックス作成報酬を発生させます。 + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**:サブグラフの構築、デプロイ、公開のための強力なDAPです。 +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **アービトレーター(仲裁人)**: 仲裁人は、ガバナンスプロセスを通じて任命されるネットワーク参加者です。仲裁人の役割は、インデックス作成とクエリの論争の結果を決定することです。その目的は、グラフネットワークの実用性と信頼性を最大化することです。 +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **インデックス作成報酬**:インデクサーがサブグラフのインデックス作成に対して受け取る報酬です。インデックス作成報酬はGRTで分配されます。 +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **デリゲーション報酬**:GRTをインデクサーにデリゲートすることでデリゲーターが受け取る報酬。デリゲーション報酬はGRTで分配されます。 +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: Graphのワークユーティリティトークン。GRTは、ネットワーク参加者にネットワークへの貢献に対する経済的インセンティブを提供します。 +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **グラフクライアント**:GraphQLベースのDappsを分散的に構築するためのライブラリです。 +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **グラフエクスプローラ**:ネットワーク参加者がサブグラフを探索し、プロトコルと対話するために設計されたdappです。 +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **グラフCLI**:The Graphを構築し、デプロイするためのコマンドラインインターフェースツールです。 +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **クールダウン期間**:デリゲーションパラメータを変更したインデクサが再度デリゲーションできるようになるまでの残り時間。 +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. - **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **移行**:サブグラフの古いバージョンから新しいバージョンに移行するキュレーション共有のプロセスです(例えば、v0.0.1がv0.0.2に更新される場合)。 +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). From f54b451d756508c266bbf4d6122ac95164ade98e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:57 -0500 Subject: [PATCH 0443/1534] New translations glossary.mdx (Korean) --- website/src/pages/ko/resources/glossary.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ko/resources/glossary.mdx b/website/src/pages/ko/resources/glossary.mdx index bdeff7281023..ffcd4bca2eed 100644 --- a/website/src/pages/ko/resources/glossary.mdx +++ b/website/src/pages/ko/resources/glossary.mdx @@ -22,6 +22,8 @@ title: Glossary - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -42,7 +44,7 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: Glossary - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From cf3cc881a69496b51317538ca17731ae1f460a53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:58 -0500 Subject: [PATCH 0444/1534] New translations glossary.mdx (Dutch) --- website/src/pages/nl/resources/glossary.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/src/pages/nl/resources/glossary.mdx b/website/src/pages/nl/resources/glossary.mdx index bdeff7281023..ffcd4bca2eed 100644 --- a/website/src/pages/nl/resources/glossary.mdx +++ b/website/src/pages/nl/resources/glossary.mdx @@ -22,6 +22,8 @@ title: Glossary - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -42,7 +44,7 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: Glossary - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From d55c3b9d66b2140f15eb4cc0510762f8176ef3c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:51:59 -0500 Subject: [PATCH 0445/1534] New translations glossary.mdx (Polish) --- website/src/pages/pl/resources/glossary.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/src/pages/pl/resources/glossary.mdx b/website/src/pages/pl/resources/glossary.mdx index bdeff7281023..ffcd4bca2eed 100644 --- a/website/src/pages/pl/resources/glossary.mdx +++ b/website/src/pages/pl/resources/glossary.mdx @@ -22,6 +22,8 @@ title: Glossary - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -42,7 +44,7 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: Glossary - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From bbf2993ad3a8cc2abcad2afe78a54d80e88be933 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:00 -0500 Subject: [PATCH 0446/1534] New translations glossary.mdx (Portuguese) --- website/src/pages/pt/resources/glossary.mdx | 68 +++++++++++---------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/website/src/pages/pt/resources/glossary.mdx b/website/src/pages/pt/resources/glossary.mdx index e7442f46ccba..4660c4d00ecf 100644 --- a/website/src/pages/pt/resources/glossary.mdx +++ b/website/src/pages/pt/resources/glossary.mdx @@ -2,57 +2,59 @@ title: Glossário --- -- **The Graph**: Um protocolo descentralizado para a indexação e consulta de dados. +- **The Graph:** Um protocolo descentralizado para indexação e query de dados. -- **Query**: Um pedido de dados. No caso do The Graph, um query é um pedido por dados de um subgraph que será respondido por um Indexador. +- **Query:** Uma solicitação de dados. No The Graph, um query é uma solicitação por dados de um subgraph que será respondida por um Indexador. -- **GraphQL**: Uma linguagem de query para APIs e tempo de execução para cumprir estes queries com os seus dados existentes. O TheGraph usa o GraphQL para consultar subgraphs. +- **GraphQL:** Uma linguagem de queries para APIs e um runtime (programa de execução) para realizar esses queries com os dados existentes. O The Graph usa a GraphQL para fazer queries de subgraphs. -- **Endpoint** (ponto final): Um URL para consultar um subgraph. O endpoint de testes para o Subgraph Studio é `https://api.studio.thegraph.com/query///` e o endpoint do Graph Explorer é `https://gateway.thegraph.com/api//subgraphs/id/`. O endpoint do Graph Explorer é utilizado para consultar subgraphs na rede descentralizada do The Graph. +- **Endpoint**: Um URL que pode ser usado para fazer queries. O ponto final de execução para o Subgraph Studio é `https://api.studio.thegraph.com/query///`, e o do Graph Explorer é `https://gateway.thegraph.com/api//subgraphs/id/`. O ponto final do Graph Explorer é usado para fazer queries de subgraphs na rede descentralizada do The Graph. -- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. +- **Subgraph:** Uma API aberta que extrai, processa, e guarda dados de uma blockchain para facilitar queries via a GraphQL. Os programadores podem construir, lançar, e editar subgraphs na The Graph Network. Indexado, o subgraph está sujeito a queries por quem quiser solicitar. -- **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. +- **Indexador**: Um participante da rede que executa nodes de indexação para indexar dados de blockchains e servir queries da GraphQL. -- **Fontes de Renda de Indexadores**: Os indexadores são recompensados em GRT com dois componentes: rebates de taxa de query e recompensas de indexação. +- **Fluxos de Receita de Indexadores:** Os Indexadores são recompensados em GRT com dois componentes: Rebates de taxa de query e recompensas de indexação. - 1. **Rebates de Taxa de Query**: Pagamentos de consumidores de subgraph por servir queries na rede. + 1. **Rebates de Taxa de Query**: Pagamentos de consumidores de subgraphs por servir queries na rede. - 2. **Recompensas de Indexação**: As recompensas que os Indexadores recebem por indexar subgraphs. Estas são geradas por novas emissões de 3% GRT ao ano. + 2. **Recompensas de Indexação**: São recebidas por Indexadores por indexar subgraphs, e geradas via a emissão anual de 3% de GRT. -- **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- \*\*Auto-Stake (Stake Próprio) do Indexador: A quantia de GRT que os Indexadores usam para participar na rede descentralizada. A quantia mínima é 100.000 GRT, e não há limite máximo. -- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. +- **Capacidade de Delegação**: A quantia máxima de GRT que um Indexador pode aceitar dos Delegantes. Os Indexadores só podem aceitar até 16 vezes o seu Auto-Stake, e mais delegações resultam em recompensas diluídas. Por exemplo: se um Indexador tem um Auto-Stake de 1 milhão de GRT, a capacidade de delegação é 16 milhões. Porém, os Indexadores só podem aumentar a sua Capacidade de Delegação se aumentarem também o seu Auto-Stake. -- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Indexador de Atualizações**: Um Indexador de reserva para queries não servidos por outros Indexadores na rede. Este Indexador não compete com outros Indexadores. -- **Taxa de Delegação**: Uma taxa de 0.5% paga pelos Delegantes ao delegar GRT aos Indexadores. O GRT usado para pagar a taxa é queimado. +- **Delegante:** Um participante da rede que possui GRT e delega uma quantia para Indexadores, permitindo que esses aumentem o seu stake em subgraphs. Em retorno, os Delegantes recebem uma porção das Recompensas de Indexação recebidas pelos Indexadores por processar subgraphs. -- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. +- **Taxa de Delegação**: Uma taxa de 0,5% paga por Delegantes quando delegam GRT a Indexadores. O GRT usado para pagar a taxa é queimado. -- **Taxa de Curadoria**: Uma taxa de 1% paga pelos Curadores quando sinalizam GRT em subgraphs. O GRT usado para pagar a taxa é queimado. +- **Curador:** Um participante da rede que identifica subgraphs de qualidade e sinaliza GRT para eles em troca de ações de curadoria. Quando os Indexadores resgatam as taxas de query em um subgraph, 10% é distribuído para os Curadores desse subgraph. Há uma correlação positiva entre a quantia de GRT sinalizada e o número de Indexadores a indexar um subgraph. -- **Data Consumer**: Any application or user that queries a subgraph. +- \*\*Taxa de Curadoria: Uma taxa de 1% paga pelos Curadores quando sinalizam GRT em subgraphs. O GRT usado para pagar a taxa é queimado. -- **Programador de Subgraph**: Um programador que constrói e lança um subgraph à rede descentralizada do The Graph. +- Consumidor de Dados: Qualquer aplicativo ou utilizador que faz queries para um subgraph. -- **Manifest de Subgraph**: Um arquivo YAML que descreve o schema GraphQL, as fontes de dados, e outros metadados. [Veja um exemplo](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml). +- \*\*Programador de Subgraph: Um programador que constrói e lança um subgraph à rede descentralizada do The Graph. -- **Epoch**: Uma unidade de tempo na rede. Um epoch atualmente dura 6.646 blocos, ou cerca de um dia. +- **Manifest de Subgraph:** Um arquivo YAML que descreve o schema, fontes de dados, e outros metadados de um subgraph. [Veja um exemplo](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml). -- **Alocação**: Um indexador pode alocar o seu stake total em GRT (incluindo o stake dos Delegantes) em subgraphs editados na rede descentralizada do The Graph. As alocações podem ter estados diferentes: +- **Epoch:** Uma unidade de tempo na rede. Um epoch atualmente dura 6.646 blocos, ou cerca de um dia. - 1. **Ativa**: Uma alocação é considerada ativa quando é criada on-chain. Isto se chama abrir de uma alocação, e indica à rede que o Indexador está a indexar e servir consultas ativamente para um subgraph particular. Alocações ativas acumulam recompensas de indexação proporcionais ao sinal no subgraph, e à quantidade de GRT alocada. +- \*\*Alocação: Um Indexador pode alocar o seu stake total em GRT (incluindo o stake dos Delegantes) em subgraphs editados na rede descentralizada do The Graph. As alocações podem ter estados diferentes: - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. + 1. **Ativa:** Uma alocação é considerada ativa quando é criada on-chain. Isto se chama abrir uma alocação, e indica à rede que o Indexador está a indexar e servir consultas ativamente para um subgraph particular. Alocações ativas acumulam recompensas de indexação proporcionais ao sinal no subgraph, e à quantidade de GRT alocada. -- **Subgraph Studio**: um dApp poderoso para a construção, lançamento e edição de subgraphs. + 2. **Fechada**: Um Indexador pode resgatar as recompensas acumuladas em um subgraph selecionado ao enviar uma Prova de Indexação (POI) recente e válida. Isto se chama "fechar uma alocação". Uma alocação deve ter ficado aberta por, no mínimo, um epoch antes que possa ser fechada. O período máximo de alocação é de 28 epochs; se um indexador deixar uma alocação aberta por mais que isso, ela se torna uma alocação obsoleta. Quando uma alocação está **Fechada**, um Pescador ainda pode abrir uma disputa contra um Indexador por servir dados falsos. -- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. +- **Subgraph Studio**: um dApp (aplicativo descentralizado) poderoso para a construção, lançamento e edição de subgraphs. -- **Árbitros**: Participantes da rede apontados por um processo de governança. O papel do Árbitro é decidir o resultado de disputas de indexação e consultas, e a sua meta é maximizar a utilidade e confiança da Graph Network. +- **Pescadores**: Um papel na Graph Network cumprido por participantes que monitoram a precisão e integridade dos dados servidos pelos Indexadores. Quando um Pescador identifica uma resposta de query ou uma POI que acreditam ser incorreta, ele pode iniciar uma disputa contra o Indexador. Se a disputa der um veredito a favor do Pescador, o Indexador é cortado, ou seja, perderá 2.5% do seu auto-stake de GRT. Desta quantidade, 50% é dado ao Pescador como recompensa pela sua vigilância, e os 50% restantes são retirados da circulação (queimados). Este mecanismo é desenhado para encorajar Pescadores a ajudar a manter a confiança na rede ao garantir que Indexadores sejam responsabilizados pelos dados que providenciam. -- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Árbitros**: Participantes da rede apontados por um processo de governança. O Árbitro decide o resultado de disputas de indexação e consultas, e a sua meta é maximizar a utilidade e confiança da Graph Network. + +- Corte: Os Indexadores podem tomar cortes no seu self-stake de GRT por fornecer uma prova de indexação (POI) incorreta ou servir dados imprecisos. A percentagem de corte é um parâmetro do protocolo, atualmente configurado em 2,5% do auto-stake de um Indexador. 50% do GRT cortado vai ao Pescador que disputou os dados ou POI incorretos. Os outros 50% são queimados. - **Recompensas de Indexação**: As recompensas que os Indexadores recebem por indexar subgraphs, distribuídas em GRT. @@ -60,11 +62,11 @@ title: Glossário - **GRT**: O token de utilidade do The Graph, que oferece incentivos económicos a participantes da rede por contribuir. -- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI (Prova de Indexação)**: Quando um Indexador fecha a sua alocação e quer resgatar as suas recompensas de indexação acumuladas em um certo subgraph, ele deve apresentar uma Prova de Indexação (POI) válida e recente. Os Pescadores podem disputar a POI providenciada por um Indexador; disputas resolvidas a favor do Pescador causam um corte para o Indexador. -- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. +- **Graph Node**: O componente que indexa subgraphs e disponibiliza os dados resultantes abertos a queries através de uma API GraphQL. Assim, ele é essencial ao stack de indexadores, e operações corretas de um Graph Node são cruciais para executar um indexador com êxito. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Agente de Indexador**: Parte do stack do indexador. Ele facilita as interações do Indexer on-chain, inclusive registos na rede, gestão de lançamentos de Subgraph ao(s) seu(s) Graph Node(s), e gestão de alocações. - **The Graph Client**: Uma biblioteca para construir dApps baseados em GraphQL de maneira descentralizada. @@ -72,10 +74,10 @@ title: Glossário - **Graph CLI**: Uma ferramenta de interface de comando de linha para construções e lançamentos no The Graph. -- **Período de Tempo de Recarga**: O tempo restante até que um Indexador que mudou os seus parâmetros de delegação possa fazê-lo novamente. +- **Período de Recarga**: O tempo restante até que um Indexador que mudou os seus parâmetros de delegação possa fazê-lo novamente. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. +- Ferramentas de Transferência para L2: Contratos inteligentes e interfaces que permitem que os participantes na rede transfiram ativos relacionados à rede da mainnet da Ethereum ao Arbitrum One. Os participantes podem transferir GRT delegado, subgraphs, ações de curadoria, e o Auto-Stake do Indexador. -- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **Atualização de um subgraph**: O processo de lançar uma nova versão de subgraph com atualizações ao manifest, schema e mapeamentos do subgraph. -- **Migração**: O processo de movimentar ações de curadoria da versão antiga de um subgraph a uma versão nova de um subgraph (por ex., quando a v.0.0.1 é atualizada à v.0.0.2). +- **Migração**: O processo de movimentar ações de curadoria da versão antiga de um subgraph à versão nova do mesmo (por ex., quando a v.0.0.1 é atualizada à v.0.0.2). From 37ceccd3c564d34e309fdf17d1caf7c7ed0b3fb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:01 -0500 Subject: [PATCH 0447/1534] New translations glossary.mdx (Russian) --- website/src/pages/ru/resources/glossary.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ru/resources/glossary.mdx b/website/src/pages/ru/resources/glossary.mdx index bdeff7281023..ffcd4bca2eed 100644 --- a/website/src/pages/ru/resources/glossary.mdx +++ b/website/src/pages/ru/resources/glossary.mdx @@ -22,6 +22,8 @@ title: Glossary - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -42,7 +44,7 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: Glossary - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From 77652b4da53c27e361f781fae6d521da0f7c42ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:02 -0500 Subject: [PATCH 0448/1534] New translations glossary.mdx (Swedish) --- website/src/pages/sv/resources/glossary.mdx | 28 +++++++++++---------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/website/src/pages/sv/resources/glossary.mdx b/website/src/pages/sv/resources/glossary.mdx index fffe4b8e1ef7..dd930819456b 100644 --- a/website/src/pages/sv/resources/glossary.mdx +++ b/website/src/pages/sv/resources/glossary.mdx @@ -2,11 +2,11 @@ title: Ordlista --- -- **The Graf**: En decentraliserad protokoll för indexering och frågning av data. +- **The Graph**: A decentralized protocol for indexing and querying data. - **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: Ett frågespråk för API:er och en körningsmiljö för att uppfylla dessa frågor med befintlig data. The Graf använder GraphQL för att fråga subgrafer. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. - **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. @@ -22,11 +22,13 @@ title: Ordlista - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **Delegationsavgift **: En avgift på 0,5% som betalas av Delegatorer när de delegerar GRT till Indexers. Det GRT som används för att betala avgiften bränns. +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. @@ -42,11 +44,11 @@ title: Ordlista - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Aktiv**: En allokering anses vara aktiv när den skapas på kedjan. Detta kallas att öppna en allokering och indikerar för nätverket att Indexer aktivt indexerar och betjänar frågor för en särskild subgraf. Aktiva allokeringar ackumulerar indexbelöningar proportionellt mot signalen på subgrafen och mängden GRT som allokerats. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **SubGraf Studio**: En kraftfull dapp för att bygga, distribuera och publicera subgrafer. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -54,25 +56,25 @@ title: Ordlista - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexbelöningar**: De belöningar som Indexers får för att indexera subgrafer. Indexbelöningar distribueras i GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **Delegationsbelöningar**: De belöningar som Delegatorer får för att delegera GRT till Indexers. Delegationsbelöningar distribueras i GRT. +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: The Graph's arbetsnytto-token. GRT tillhandahåller ekonomiska incitament för nätverksdeltagare att bidra till nätverket. +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **The Graph Klient**: Ett bibliotek för att bygga decentraliserade dappar baserade på GraphQL. +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graf Explorer**: En dapp utformad för nätverksdeltagare att utforska subgrafer och interagera med protokollet. +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **Graf CLI**: Ett kommandoradsgränssnitt för att bygga och distribuera till The Graph. +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **Cooldown-period**: Den återstående tiden tills en Indexer som ändrade sina delegationsparametrar kan göra det igen. +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. From 205e76c93e58a44ea02024c53b5f99b1883f7b1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:03 -0500 Subject: [PATCH 0449/1534] New translations glossary.mdx (Turkish) --- website/src/pages/tr/resources/glossary.mdx | 34 +++++++++++---------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/website/src/pages/tr/resources/glossary.mdx b/website/src/pages/tr/resources/glossary.mdx index 9c7b1781c257..ffcd4bca2eed 100644 --- a/website/src/pages/tr/resources/glossary.mdx +++ b/website/src/pages/tr/resources/glossary.mdx @@ -8,7 +8,7 @@ title: Glossary - **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **Uç Nokta**: Bir subgraph'ı sorgulamak için kullanılabilecek bir URL'dir. Subgraph Stüdyo için test uç noktası `https://api.studio.thegraph.com/query///` ve Graph Gezgini uç noktası `https://gateway.thegraph.com/api//subgraphs/id/` şeklindedir. Graph Gezgini uç noktası, Graph'ın merkeziyetsiz ağındaki subgraphları sorgulamak için kullanılır. +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. @@ -16,25 +16,27 @@ title: Glossary - **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Sorgu Ücreti İadeleri**: Ağda sorgular sunmak için subgraph tüketicilerinden yapılan ödemelerdir. + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **İndeksleme Ödülleri**: İndeksleyicilerin subgraph'leri indeksleme karşılığında aldığı ödüller. İndeksleme ödülleri, yıllık %3 GRT'nin yeni ihracı yoluyla oluşturulur. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **Delegasyon Vergisi**: GRT'yi indeksleyicilere stake ettiklerinde delegatörler tarafından ödenen %0,5'lik bir ücret. Ücreti ödemek için kullanılan GRT yakılır. +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **Kurasyon Vergisi**: Küratörler tarafından subgraph'lerde GRT sinyali verildiğinde ödenen %1'lik bir ücrettir. Ücreti ödemek için kullanılan GRT yakılır. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. - **Data Consumer**: Any application or user that queries a subgraph. -- **Subgraph Geliştiricisi**: Graph'in merkeziyetsiz ağına bir subgraph inşa eden ve dağıtan bir geliştirici. +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. @@ -42,11 +44,11 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Aktif**: Bir tahsis, zincir üzerinde oluşturulduğunda aktif kabul edilir. Buna tahsis açma denir ve ağa, indeksleyicinin belirli bir subgraph için sorguları aktif olarak indekslediğini ve sunduğunu gösterir. Aktif tahsisler, subgraph'teki sinyal ve tahsis edilen GRT miktarı ile orantılı olarak indeksleme ödülleri tahakkuk ettirir. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Stüdyo**: Subgraph'ler oluşturmak, deploy etmek ve yayınlamak için güçlü bir merkeziyetsiz uygulamadır. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -54,25 +56,25 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **İndeksleme Ödülleri**: İndeksleyicilerin subgraph'leri indeksleme karşılığında aldığı ödüllerdir. İndeksleme ödülleri GRT şeklinde dağıtılır. +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **Stake Ödülleri**: Delegatörlerin GRT'yi indeksleyicilere stake etme karşılığında aldığı ödüllerdir. Stake ödülleri GRT şeklinde dağıtılır. +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: Graph'in çalışma yardımcı programı belirtecidir. GRT, ağ katılımcılarına ağa katkıda bulunmaları için ekonomik teşvikler sağlar. +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **Graph Tüketicileri**: Merkeziyetsiz bir şekilde GraphQL tabanlı merkeziyetsiz uygulamalar inşa etmeye yönelik bir kitaplık. +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Gezgini**: Ağ katılımcılarının subgraph'leri keşfetmesi ve protokolle etkileşim kurması için tasarlanmış bir merkeziyetsiz uygulamadır. +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **Graph CLI**: Graph üzerinde inşa ve deploy etmek için bir komut satırı arabirim aracı. +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **Soğuma Süresi**: Yetki parametrelerini değiştiren indeksleyicinin bunu tekrar yapabilmesi için kalan süre. +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. From 156e368af8124d248a5b17111ecda4c12348ef26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:04 -0500 Subject: [PATCH 0450/1534] New translations glossary.mdx (Ukrainian) --- website/src/pages/uk/resources/glossary.mdx | 42 +++++++++++---------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/website/src/pages/uk/resources/glossary.mdx b/website/src/pages/uk/resources/glossary.mdx index 162a88d424e0..1338f2ba16ba 100644 --- a/website/src/pages/uk/resources/glossary.mdx +++ b/website/src/pages/uk/resources/glossary.mdx @@ -2,39 +2,41 @@ title: Глосарій --- -- **The Graph**: Децентралізований протокол для індексації та запитів до даних. +- **The Graph**: A decentralized protocol for indexing and querying data. -- **Запит**: Запит на отримання даних. У випадку The Graph, запит - це запит на дані з підграфа, на який відповість Індексатор. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: Мова запитів для API та середовище реалізації для виконання цих запитів з наявними у вас даними. The Graph використовує GraphQL для запитів до підграфів. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **Кінцева точка**: URL-адреса, яку можна використовувати для запиту підграфа. Кінцевою точкою тестування для Subgraph Studio є `https://api.studio.thegraph.com/query///`, а кінцевою точкою для Graph Explorer є `https://gateway.thegraph.com/api//subgraphs/id/`. Кінцева точка Graph Explorer використовується для запиту підграфів у децентралізованій мережі The Graph. +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. -- **Indexer Revenue Streams**: Винагорода індексаторам в GRT складається з двох компонентів: певна комісія за запити (query fee rebates) та винагорода за індексацію (indexing rewards). +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: Надходження від користувачів підграфів за обслуговування запитів у мережі. + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: Винагорода, яку отримують індексатори за індексування підграфів. Винагорода за індексацію формується шляхом випуску нової емісії 3% GRT щорічно. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **Delegation Tax**: Комісія у розмірі 0.5% сплачується делегатами, коли вони делегують власні GRT індексаторам. GRT, який використовувався для сплати цієї комісії, спалюється. +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **Curation Tax**: Комісія у розмірі 1%, яку сплачують куратори, коли подають сигнал в токенах GRT на підграфи. Відповідно GRT, що використовується для сплати цієї комісії, спалюється. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. - **Data Consumer**: Any application or user that queries a subgraph. -- **Розробник підграфа**: Розробник, який створює та розгортає підграф у децентралізованій мережі The Graph. +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. @@ -42,11 +44,11 @@ title: Глосарій - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Активний**: Розподіл вважається активним, коли він створюється всередині мережі. Це називається відкриттям розподілу і вказує мережі на те, що індексатор активно індексує та обслуговує запити для конкретного підграфа. При активному розподілі нараховується винагорода за індексацію пропорційно до кількості сигналів на підграфі та суми розподілених GRT токенів. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: Потужний додаток для створення, розгортання та публікації підграфів. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -54,25 +56,25 @@ title: Глосарій - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Нагороди за індексацію**: Винагорода, яку отримують Індексатори за індексування підграфів. Винагороди за індексацію розподіляються в токенах GRT. +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **Нагороди за делегацію**: Винагороди, які отримують Делегати за делегування GRT Індексаторам. Винагорода за делегування розподіляється в токенах GRT. +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: Функціональний токен екосистеми The Graph. GRT надає економічні заохочення учасникам мережі за їх внесок у її розвиток. +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **Клієнт The Graph**: Бібліотека для децентралізованого створення додатків на основі GraphQL. +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: Додаток, призначений для користувачів мережі, щоб переглядати підграфи та взаємодіяти з протоколом. +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **Graph CLI**: Інструмент інтерфейсу командного рядка для побудови та розгортання в мережі The Graph. +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **Період очікування**: Час, що залишився до того моменту, як індексатор, який змінив параметри делегування, зможе зробити це знову. +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. From d4ec5e5174d623729aed0dc0093466fb226c1814 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:05 -0500 Subject: [PATCH 0451/1534] New translations glossary.mdx (Chinese Simplified) --- website/src/pages/zh/resources/glossary.mdx | 46 +++++++++++---------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/website/src/pages/zh/resources/glossary.mdx b/website/src/pages/zh/resources/glossary.mdx index 99305cea1b15..98e473e0a8ae 100644 --- a/website/src/pages/zh/resources/glossary.mdx +++ b/website/src/pages/zh/resources/glossary.mdx @@ -2,77 +2,79 @@ title: 术语汇编 --- -- **Graph**: 用于索引和查询数据的去中心化协议。 +- **The Graph**: A decentralized protocol for indexing and querying data. -- **Query**: 对数据的请求。对于Graph,查询是从子图中请求数据,并由索引人回答。 +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: 用于 API 的查询语言,以及用现有数据实现这些查询的运行。Graph 使用 GraphQL 查询子图。 +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **Endpoint**: 可以用来查询子图的URL。Subgraph Studio的测试端点是`https://api.studio.thegraph.com/query///`,Graph浏览器端点`为https://gateway.thegraph.com/api//subgraphs/id/`。Graph浏览器端点用于查询Graph的去中心化网络上的子图。 +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. -- **Indexer Revenue Streams**:索引人在 GRT 中获得两个组成部分: 查询费用回扣和索引奖励。 +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **Query Fee Rebates**: 子图消费者为网络上的查询提供服务支付的费用。 + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **Indexing Rewards**: 索引人因为索引子图而获得的奖励。索引奖励是通过每年发行3% 的 GRT 来产生的。 + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **Delegation Tax**: 委托人将 GRT 委托给索引人时支付的0.5% 的费用。用于支付费用的 GRT 将被消耗。 +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **Curation Tax**: 当策展人在子图上显示 GRT 时,他们要支付1% 的费用。用于支付费用的 GRT 将被消耗。 +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. - **Data Consumer**: Any application or user that queries a subgraph. -- **Subgraph Developer**: 构建并部署子图到 Graph 去中心化网络的开发人员。 +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. -- **Epoch**: 网络中的时间单位。一个时期目前为6,646个区块或大约1天。 +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: 分配在链上创建时被认为是活动的。这称为打开一个分配,并向网络表明索引人正在为特定子图建立索引并提供查询服务。主动分配的增值索引奖励与子图上的信号以及分配的 GRT 的数量成比例。 + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: 用于构建、部署和发布子图的强大 dapp。 +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: 仲裁员是通过治理设置的网络参与者。仲裁员的作用是决定索引和查询争议的结果。他们的目标是最大限度地提高Graph网络的效用和可靠性。 +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: 索引人因为索引子图而获得的奖励。索引奖励是通过GRT 来分配的。 +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **Delegation Rewards**: 委托人将 GRT 委托给索引人所获得的奖励。委托奖励以 GRT 的形式分配。 +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: Graph的工作效用代币。 GRT 为网络参与者提供经济激励,鼓励他们为网络做出贡献。 +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **The Graph Client**: 用于以去中心化方式构建基于 GraphQL 的 dapps 的库。 +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: 为网络参与者探索子图并与协议交互而设计的 dapp。 +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **Graph CLI**: 用于构建和部署到 Graph 的命令行界面工具。 +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **Cooldown Period**: 直到更改其委托参数的索引人可以再次进行此操作之前的剩余时间。 +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. From 5a8ba88f912e7143acfabeb4be8a04c57323a21e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:06 -0500 Subject: [PATCH 0452/1534] New translations glossary.mdx (Urdu (Pakistan)) --- website/src/pages/ur/resources/glossary.mdx | 42 +++++++++++---------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/website/src/pages/ur/resources/glossary.mdx b/website/src/pages/ur/resources/glossary.mdx index 1b58dd720662..bece9e2db4ea 100644 --- a/website/src/pages/ur/resources/glossary.mdx +++ b/website/src/pages/ur/resources/glossary.mdx @@ -2,39 +2,41 @@ title: لغت --- -- **گراف**: ڈیٹا کی انڈیکسنگ اور کیوری کے لیے ایک ڈیسینٹرالائزڈ پروٹوکول. +- **The Graph**: A decentralized protocol for indexing and querying data. -- **کیوری**: ڈیٹا کی درخواست۔ گراف کی صورت میں، ایک کیوری سب گراف سے ڈیٹا کی درخواست ہے جس کا جواب انڈیکسر دے گا. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: APIs کے لیے کیوری کی زبان اور آپ کے موجودہ ڈیٹا کے ساتھ ان کیوریز کو پورا کرنے کے لیے رن ٹائم۔ گراف سب گراف سے کیوری کرنے کے لیے GraphQL کا استعمال کرتا ہے. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **اینڈ پوائنٹ**: ایک URL جسے سب گراف سے کیوری کرنے کے لیے استعمال کیا جا سکتا ہے۔ سب گراف سٹوڈیو کے لیے ٹیسٹنگ اینڈ پوائنٹ ہے `https://api.studio.thegraph.com/query///` اور گراف ایکسپلورر اینڈ پوائنٹ ہے `https: //gateway.thegraph.com/api//subgraphs/id/`۔ گراف ایکسپلورر اینڈ پوائنٹ کا استعمال گراف کے ڈیسینٹرالائزڈ نیٹ ورک پر سب گراف کے بارے میں کیوری کرنے کے لیے کیا جاتا ہے. +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. -- **انڈیکسر کے آمدنی کے سلسلے** انڈیکسرز کو GRT میں دو اجزاء کے ساتھ انعام دیا جاتا ہے: کیوری کی فیس میں چھوٹ اور انڈیکسنگ کے انعامات. +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **کیوری فیس کی چھوٹ**: نیٹ ورک پر کیوریز پیش کرنے کے لیے سب گراف صارفین کی جانب سے ادائیگیاں. + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **انڈیکسنگ انعامات**: وہ انعامات جو انڈیکسرز کو انڈیکس کرنے والے سب گراف کے لیے موصول ہوتے ہیں۔انڈیکسنگ کے انعامات سالانہ 3% GRT کے نئے اجراء کے ذریعے بنائے جاتے ہیں. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **ڈیلیگیشن ٹیکس**: ڈیلیگیٹرز کی طرف سے 0.5% فیس ادا کی جاتی ہے جب وہ انڈیکسرز کو GRT تفویض کرتے ہیں۔ فیس کی ادائیگی کے لیے استعمال ہونے والی GRT کو جلا دیا جاتا ہے. +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **کیوریشن ٹیکس**: کیوریٹرز کے ذریعہ 1% فیس ادا کی جاتی ہے جب وہ سب گرافس پر GRT کا اشارہ دیتے ہیں۔ فیس کی ادائیگی کے لیے استعمال ہونے والی GRT کو جلا دیا جاتا ہے. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. - **Data Consumer**: Any application or user that queries a subgraph. -- **سب گراف ڈویلپر**: ایک ڈویلپر جو گراف کے ڈیسینٹرالائزڈ نیٹ ورک پر سب گراف بناتا اور تعینات کرتا ہے. +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. @@ -42,11 +44,11 @@ title: لغت - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **فعال**: ایک مختص کو فعال سمجھا جاتا ہے جب اسے آن چین بنایا جاتا ہے۔ اسے ایلوکیشن کھولنا کہا جاتا ہے، اور یہ نیٹ ورک کی طرف اشارہ کرتا ہے کہ انڈیکسر کسی خاص سب گراف کے لیے فعال طور پر انڈیکس کر رہا ہے اور کیوریز پیش کر رہا ہے۔ فعال مختصات سب گراف پر سگنل کے متناسب انڈیکسنگ انعامات اور مختص کردہ GRT کی رقم جمع کرتی ہیں. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **سب گراف اسٹوڈیو**: سب گراف کی تعمیر، تعیناتی اور اشاعت کے لیے ایک طاقتور ڈیپ. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -54,25 +56,25 @@ title: لغت - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **انڈیکسنگ انعامات**: وہ انعامات جو انڈیکسرز کو انڈیکس کرنے والے سب گراف کے لیے موصول ہوتے ہیں۔ انڈیکسنگ کے انعامات GRT میں تقسیم کیے جاتے ہیں. +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **ڈیلیگیشن کے انعامات**: انڈیکسرز کو GRT تفویض کرنے پر ڈیلیگیٹرز کو ملنے والے انعامات۔ ڈیلیگیشن کے انعامات GRT میں تقسیم کیے جاتے ہیں. +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: گراف کے کام کا یوٹیلیٹی ٹوکن۔ GRT نیٹ ورک میں حصہ ڈالنے کے لیے نیٹ ورک کے شرکاء کو اقتصادی مراعات فراہم کرتا ہے. +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **گراف کلائنٹ**: ڈیسینٹرالائزڈ طریقے سے GraphQL پر مبنی ڈیپ بنانے کے لیے ایک لائبریری. +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **گراف ایکسپلورر**: ایک ڈیپ جو نیٹ ورک کے شرکاء کے لیے سب گراف کو دریافت کرنے اور پروٹوکول کے ساتھ تعامل کے لیے ڈیزائن کیا گیا ہے. +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **گراف CLI**: گراف کی تعمیر اور تعیناتی کے لیے ایک کمانڈ لائن انٹرفیس ٹول. +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **کولڈاؤن کا دورانیہ**: ایک انڈیکسر جس نے اپنے ڈیلیگیشن پیرامیٹرز کو تبدیل کیا ہے اس وقت تک باقی وقت دوبارہ ایسا کر سکتا ہے. +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. From dfd8c11a80b43736d6ac74592afaf015206e457c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:07 -0500 Subject: [PATCH 0453/1534] New translations glossary.mdx (Vietnamese) --- website/src/pages/vi/resources/glossary.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/src/pages/vi/resources/glossary.mdx b/website/src/pages/vi/resources/glossary.mdx index bdeff7281023..ffcd4bca2eed 100644 --- a/website/src/pages/vi/resources/glossary.mdx +++ b/website/src/pages/vi/resources/glossary.mdx @@ -22,6 +22,8 @@ title: Glossary - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. @@ -42,7 +44,7 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. @@ -64,7 +66,7 @@ title: Glossary - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. - **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. From a0e84f9d60c25ed9b262ca9147298e5d6f232df5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:08 -0500 Subject: [PATCH 0454/1534] New translations glossary.mdx (Marathi) --- website/src/pages/mr/resources/glossary.mdx | 36 +++++++++++---------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/website/src/pages/mr/resources/glossary.mdx b/website/src/pages/mr/resources/glossary.mdx index 71fd8619af45..ffcd4bca2eed 100644 --- a/website/src/pages/mr/resources/glossary.mdx +++ b/website/src/pages/mr/resources/glossary.mdx @@ -4,9 +4,9 @@ title: Glossary - **The Graph**: A decentralized protocol for indexing and querying data. -- **क्वेरी**: डेटासाठी विनंती. द ग्राफच्या बाबतीत, क्वेरी ही सबग्राफमधील डेटाची विनंती आहे ज्याचे उत्तर इंडेक्सरद्वारे दिले जाईल. +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: API साठी क्वेरी भाषा आणि आपल्या विद्यमान डेटासह त्या क्वेरी पूर्ण करण्यासाठी रनटाइम. आलेख सबग्राफ क्वेरी करण्यासाठी GraphQL वापरतो. +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. - **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. @@ -14,27 +14,29 @@ title: Glossary - **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. -- **इंडेक्सर रेव्हेन्यू स्ट्रीम्स**: GRT मध्ये इंडेक्सर्सना दोन घटकांसह पुरस्कृत केले जाते: क्वेरी फी रिबेट्स आणि इंडेक्सिंग रिवॉर्ड्स. +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **इंडेक्सिंग रिवॉर्ड्स**: इंडेक्सर्सना अनुक्रमणिका सबग्राफसाठी प्राप्त होणारे पुरस्कार. इंडेक्सिंग रिवॉर्ड्स वार्षिक 3% GRT च्या नवीन जारी करून व्युत्पन्न केले जातात. + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. - **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. + - **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. - **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **प्रतिनिधी कर**: प्रतिनिधींनी इंडेक्सर्सना GRT सोपवल्यावर 0.5% शुल्क. फी भरण्यासाठी वापरण्यात आलेला जीआरटी जळाला आहे. +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. - **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **क्युरेशन टॅक्स**: क्युरेटर्सने सबग्राफवर GRT सिग्नल केल्यावर 1% फी भरली जाते. फी भरण्यासाठी वापरण्यात आलेला जीआरटी जळाला आहे. +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. - **Data Consumer**: Any application or user that queries a subgraph. -- **सबग्राफ डेव्हलपर**: एक विकासक जो ग्राफच्या विकेंद्रीकृत नेटवर्कवर सबग्राफ तयार करतो आणि तैनात करतो. +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. @@ -42,11 +44,11 @@ title: Glossary - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **सक्रिय**: ऑन-चेन तयार केल्यावर वाटप सक्रिय मानले जाते. याला वाटप उघडणे म्हणतात, आणि नेटवर्कला सूचित करते की इंडेक्सर सक्रियपणे अनुक्रमित करत आहे आणि विशिष्ट सबग्राफसाठी क्वेरी सर्व्ह करत आहे. सक्रिय वाटप सबग्राफवरील सिग्नल आणि वाटप केलेल्या GRT रकमेच्या प्रमाणात अनुक्रमणिका बक्षिसे जमा करतात. + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **सबग्राफ स्टुडिओ**: सबग्राफ तयार करणे, उपयोजित करणे आणि प्रकाशित करणे यासाठी एक शक्तिशाली डॅप. +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. - **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. @@ -54,25 +56,25 @@ title: Glossary - **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **इंडेक्सिंग रिवॉर्ड्स**: इंडेक्सर्सना अनुक्रमणिका सबग्राफसाठी प्राप्त होणारे पुरस्कार. इंडेक्सिंग रिवॉर्ड्स GRT मध्ये वितरीत केले जातात. +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **प्रतिनिधी पुरस्कार**: इंडेक्सर्सना GRT सोपवल्याबद्दल प्रतिनिधींना मिळणारे पुरस्कार. प्रतिनिधी पुरस्कार GRT मध्ये वितरित केले जातात. +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: आलेखाचे कार्य उपयुक्तता टोकन. GRT नेटवर्क सहभागींना नेटवर्कमध्ये योगदान देण्यासाठी आर्थिक प्रोत्साहन देते. +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. - **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **द ग्राफ क्लायंट**: GraphQL-आधारित dapps विकेंद्रित पद्धतीने तयार करण्यासाठी लायब्ररी. +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **Graph Explorer**: नेटवर्क सहभागींसाठी सबग्राफ एक्सप्लोर करण्यासाठी आणि प्रोटोकॉलशी संवाद साधण्यासाठी डिझाइन केलेले dapp. +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **Graph CLI**: ग्राफ तयार करण्यासाठी आणि उपयोजित करण्यासाठी कमांड लाइन इंटरफेस साधन. +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **कूलडाउन कालावधी**: इंडेक्सर ज्याने त्यांचे प्रतिनिधीत्व पॅरामीटर्स बदलले आहेत तोपर्यंत तो पुन्हा करू शकतो. +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. - **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. From 7806b02e1db39fab4e0cc3177818f0248fe36c88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:09 -0500 Subject: [PATCH 0455/1534] New translations glossary.mdx (Hindi) --- website/src/pages/hi/resources/glossary.mdx | 70 +++++++++++---------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/website/src/pages/hi/resources/glossary.mdx b/website/src/pages/hi/resources/glossary.mdx index 244ef6f95cdd..d7c1fd85df2b 100644 --- a/website/src/pages/hi/resources/glossary.mdx +++ b/website/src/pages/hi/resources/glossary.mdx @@ -2,39 +2,41 @@ title: शब्दकोष --- -- **द ग्राफ़**: डेटा को इंडेक्स करने और क्वेरी करने के लिए एक विकेन्द्रीकृत प्रोटोकॉल। +- **The Graph**: A decentralized protocol for indexing and querying data. -- **क्वेरी**: डेटा के लिए एक अनुरोध। द ग्राफ़ के मामले में, एक क्वेरी एक सबग्राफ से डेटा के लिए एक अनुरोध है जिसका उत्तर एक इंडेक्सर द्वारा दिया जाएगा। +- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **ग्राफ़क्यूएल**: एपीआई के लिए एक क्वेरी भाषा और आपके मौजूदा डेटा के साथ उन क्वेरीज़ को पूरा करने के लिए एक रनटाइम। ग्राफ सबग्राफ को क्वेरी करने के लिए ग्राफक्लाइन का उपयोग करता है। +- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. -- **समाप्ति बिंदु**: एक URL जिसका उपयोग किसी सबग्राफ को क्वेरी करने के लिए किया जा सकता है। सबग्राफ स्टूडियो के लिए टेस्टिंग एंडपॉइंट `https://api.studio.thegraph.com/query///` है और ग्राफ एक्सप्लोरर एंडपॉइंट `https है: //gateway.thegraph.com/api//subgraphs/id/`। ग्राफ़ एक्सप्लोरर समापन बिंदु का उपयोग ग्राफ़ के विकेन्द्रीकृत नेटवर्क पर उप-अनुच्छेदों को क्वेरी करने के लिए किया जाता है। +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. -- **Subgraph**: एक ओपन एपीआई जो ब्लॉकचेन से डेटा निकालती है, उसे प्रोसेस करती है, और इसे इस तरह से स्टोर करती है कि इसे GraphQL के माध्यम से आसानी से क्वेरी किया जा सके। डेवलपर्स subgraphs बना, तैनात और The Graph Network पर प्रकाशित कर सकते हैं। एक बार इंडेक्स होने के बाद, subgraph को कोई भी क्वेरी कर सकता है। +- **Subgraph**: An open API that extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. Developers can build, deploy, and publish subgraphs to The Graph Network. Once it is indexed, the subgraph can be queried by anyone. -- **Indexer**: नेटवर्क प्रतिभागी जो इंडेक्सिंग नोड्स चलाते हैं ताकि ब्लॉकचेन से डेटा को इंडेक्स करें और GraphQL क्वेरीज़ को सर्व करें। +- **Indexer**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. -- **इंडेक्सर रेवेन्यू स्ट्रीम**: इंडेक्सर्स को जीआरटी में दो घटकों के साथ पुरस्कृत किया जाता है: क्वेरी शुल्क छूट और इंडेक्सिंग पुरस्कार। +- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. - 1. **प्रश्न शुल्क छूट**: नेटवर्क पर प्रश्न प्रस्तुत करने के लिए सबग्राफ उपभोक्ताओं से भुगतान। + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. - 2. **इंडेक्सिंग रिवार्ड्स**: वे पुरस्कार जो इंडेक्सर्स को सबग्राफ इंडेक्स करने के लिए मिलते हैं। इंडेक्सिंग पुरस्कार सालाना 3% जीआरटी के नए जारी करने के माध्यम से उत्पन्न होते हैं। + 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. -- **Indexer का Self-Stake**: वह मात्रा GRT जो Indexers विकेंद्रीकृत नेटवर्क में भाग लेने के लिए स्टेक करते हैं। न्यूनतम 100,000 GRT है, और कोई ऊपरी सीमा नहीं है। +- **Indexer's Self-Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. -- **Upgrade Indexer**: एक Indexer जिसे नेटवर्क पर अन्य Indexers द्वारा सेवा प्रदान नहीं किए जाने वाले subgraph प्रश्नों के लिए एक बैकअप के रूप में कार्य करने के लिए डिज़ाइन किया गया है। Upgrade Indexer अन्य Indexers के साथ प्रतिस्पर्धात्मक नहीं है। +- **Delegation Capacity**: The maximum amount of GRT an Indexer can accept from Delegators. Indexers can only accept up to 16x their Indexer Self-Stake, and additional delegation results in diluted rewards. For example, if an Indexer has a Self-Stake of 1M GRT, their delegation capacity is 16M. However, Indexers can increase their Delegation Capacity by increasing their Self-Stake. -- **Delegator**: नेटवर्क प्रतिभागी जो GRT के मालिक होते हैं और अपने GRT को Indexers को डेलीगेट करते हैं। इससे Indexers को नेटवर्क पर subgraphs में अपनी हिस्सेदारी बढ़ाने की अनुमति मिलती है। इसके बदले, Delegators को उन Indexing Rewards का एक हिस्सा मिलता है जो Indexers को subgraphs को प्रोसेस करने के लिए मिलते हैं। +- **Upgrade Indexer**: An Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. The upgrade Indexer is not competitive with other Indexers. -- **प्रत्यायोजन कर**: प्रतिनिधि द्वारा 0.5% शुल्क का भुगतान किया जाता है, जब वे अनुक्रमणकों को GRT प्रत्यायोजित करते हैं. शुल्क का भुगतान करने के लिए प्रयुक्त GRT जल गया है। +- **Delegator**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **Curator**: नेटवर्क प्रतिभागी जो उच्च गुणवत्ता वाले subgraphs की पहचान करते हैं, और उनके लिए सिग्नल GRT प्राप्त करते हैं, जिसके बदले में उन्हें क्यूरेशन शेयर मिलते हैं। जब Indexers किसी subgraph पर क्वेरी शुल्क लेते हैं, तो उसका 10% उस subgraph के Curators को वितरित किया जाता है। GRT के सिग्नल की मात्रा और एक subgraph को इंडेक्सिंग करने वाले Indexers की संख्या के बीच एक सकारात्मक संबंध होता है। +- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. -- **क्यूरेशन टैक्स**: क्यूरेटर द्वारा सबग्राफ पर GRT का संकेत देने पर 1% शुल्क का भुगतान किया जाता है। शुल्क का भुगतान करने के लिए प्रयुक्त GRT जल गया है। +- **Curator**: Network participants that identify high-quality subgraphs, and signal GRT on them in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. There is a positive correlation between the amount of GRT signaled and the number of Indexers indexing a subgraph. -- **डेटा उपभोक्ता**: कोई भी एप्लिकेशन या उपयोगकर्ता जो एक subgraph का क्वेरी करता है। +- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. -- **सबग्राफ डेवलपर**: एक डेवलपर जो ग्राफ़ के विकेंद्रीकृत नेटवर्क के लिए एक सबग्राफ़ बनाता और तैनात करता है। +- **Data Consumer**: Any application or user that queries a subgraph. + +- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. - **Subgraph Manifest**: A YAML file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://github.com/graphprotocol/example-subgraph/blob/master/subgraph.yaml) is an example. @@ -42,40 +44,40 @@ title: शब्दकोष - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations can have different statuses: - 1. **सक्रिय**: एक आवंटन को तब सक्रिय माना जाता है जब इसे ऑन-चेन बनाया जाता है। इसे ओपनिंग आबंटन कहा जाता है, और यह नेटवर्क को इंगित करता है कि इंडेक्सर सक्रिय रूप से अनुक्रमण कर रहा है और किसी विशेष सबग्राफ के लिए प्रश्नों की सेवा कर रहा है। सक्रिय आबंटन उप-अनुच्छेद पर संकेत के अनुपात में अनुक्रमित पुरस्कार अर्जित करते हैं, और आवंटित जीआरटी की राशि। + 1. **Active**: An allocation is considered active when it is created onchain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **बंद**: एक Indexer एक दिए गए Subgraph पर अर्जित indexing पुरस्कारों का दावा कर सकता है, जब वह हाल का और वैध **Proof of Indexing (POI)** प्रस्तुत करता है। इसे आवंटन बंद करना कहा जाता है। एक आवंटन को बंद करने से पहले न्यूनतम एक युग के लिए खुला होना चाहिए। अधिकतम आवंटन अवधि 28 युग होती है। यदि एक Indexer 28 युगों के बाद भी आवंटन को खुला रखता है, तो इसे बासी आवंटन कहा जाता है। जब एक आवंटन **बंद** स्थिति में होता है, तो एक फिशरमैन अभी भी Indexer को झूठे डेटा की सेवा करने के लिए चुनौती देने के लिए एक विवाद खोल सकता है। + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an Indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **सबग्राफ स्टूडियो**: सबग्राफ बनाने, लगाने और प्रकाशित करने के लिए एक शक्तिशाली डैप। +- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: The Graph Network में एक भूमिका, जिसे भागीदारों द्वारा निभाया जाता है, जो Indexers द्वारा प्रदान किए गए डेटा की सटीकता और अखंडता की निगरानी करते हैं। जब एक मछुआरा किसी क्वेरी प्रतिक्रिया या POI की पहचान करता है जिसे वह गलत मानता है, तो वह Indexer के खिलाफ एक विवाद शुरू कर सकता है। यदि विवाद का निर्णय मछुआरे के पक्ष में होता है, तो Indexer को 2.5% अपने स्वयं के स्टेक के नुकसान के रूप में दंडित किया जाता है। इस राशि का 50% मछुआरे को उनकी सतर्कता के लिए इनाम के रूप में दिया जाता है, और शेष 50% को परिसंचरण से हटा दिया जाता है (जलाया जाता है)। यह तंत्र मछुआरों को नेटवर्क की विश्वसनीयता बनाए रखने में मदद करने के लिए प्रोत्साहित करने के लिए डिज़ाइन किया गया है, यह सुनिश्चित करते हुए कि Indexers द्वारा प्रदान किए गए डेटा के लिए उन्हें जवाबदेह ठहराया जाए। +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed by losing 2.5% of their self-stake. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. - **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers अपनी स्वयं-स्टेक की GRT को गलत POI प्रदान करने या गलत डेटा सेवा देने के लिए स्लैश किया जा सकता है। स्लैशिंग प्रतिशत एक प्रोटोकॉल पैरामीटर है जिसे वर्तमान में एक Indexer's की स्वयं-स्टेक का 2.5% पर सेट किया गया है। स्लैश की गई GRT का 50% उस Fisherman को जाता है जिसने गलत डेटा या गलत POI का विवाद किया। बाकी 50% जलाया जाता है। +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self-stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **इंडेक्सिंग रिवार्ड्स**: वे पुरस्कार जो इंडेक्सर्स को सबग्राफ इंडेक्स करने के लिए मिलते हैं। इंडेक्सिंग पुरस्कार जीआरटी में वितरित किए जाते हैं। +- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. -- **प्रतिनिधि पुरस्कार**: अनुक्रमणकों को GRT प्रत्यायोजित करने के लिए प्रतिनिधि को प्राप्त होने वाले पुरस्कार. प्रतिनिधि पुरस्कार जीआरटी में वितरित किए जाते हैं। +- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. -- **GRT**: ग्राफ़ का कार्य उपयोगिता टोकन। जीआरटी नेटवर्क प्रतिभागियों को नेटवर्क में योगदान करने के लिए आर्थिक प्रोत्साहन प्रदान करता है। +- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- ** Proof of Indexing (POI)**: जब एक इंडेक्सर अपनी आवंटन को बंद करता है और किसी दिए गए सबग्राफ़ पर अर्जित इंडेक्सिंग पुरस्कारों का दावा करना चाहता है, तो उसे एक मान्य और हाल का POI प्रदान करना होगा। मछुआरे द्वारा इंडेक्सर द्वारा प्रदान किए गए POI का विवाद किया जा सकता है। यदि विवाद मछुआरे के पक्ष में हल होता है, तो इसका परिणाम इंडेक्सर के लिए दंडस्वरूप होगा। +- **Proof of Indexing (POI)**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent POI. Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **ग्राफ़ नोड**: ग्राफ़ नोड वह घटक है जो सबग्राफ़ को इंडेक्स करता है और परिणामस्वरूप डेटा को GraphQL APIके माध्यम से क्वेरी के लिए उपलब्ध कराता है। इसलिए, यह Indexer स्टैक का केंद्रीय हिस्सा है, और ग्राफ़ नोड का सही संचालन एक सफल इंडेक्सर चलाने के लिए महत्वपूर्ण है। +- **Graph Node**: Graph Node is the component that indexes subgraphs and makes the resulting data available to query via a GraphQL API. As such it is central to the Indexer stack, and correct operation of Graph Node is crucial to running a successful Indexer. -- **Indexer एजेंट**: इंडेक्सर एजेंट इंडेक्सर स्टैक का हिस्सा है। यह नेटवर्क पर इंडेक्सर की इंटरैक्शन को सुगम बनाता है, जिसमें नेटवर्क पर पंजीकरण, अपने Graph Node(s) पर सबग्राफ़ तैनातियों का प्रबंधन, और आवंटनों का प्रबंधन शामिल है। +- **Indexer agent**: The Indexer agent is part of the Indexer stack. It facilitates the Indexer's interactions onchain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. -- **द ग्राफ़ क्लाइंट**: विकेंद्रीकृत तरीके से ग्राफ़कॉल-आधारित डैप बनाने के लिए एक लाइब्रेरी। +- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. -- **ग्राफ़ एक्सप्लोरर**: एक डैप जिसे नेटवर्क प्रतिभागियों के लिए सबग्राफ एक्सप्लोर करने और प्रोटोकॉल के साथ इंटरैक्ट करने के लिए डिज़ाइन किया गया है। +- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. -- **ग्राफ़ सीएलआई**: ग्राफ़ को बनाने और परिनियोजित करने के लिए एक कमांड लाइन इंटरफ़ेस टूल। +- **Graph CLI**: A command line interface tool for building and deploying to The Graph. -- **कूलडाउन अवधि**: वह समय जब तक कोई अनुक्रमणिका अपने प्रतिनिधिमंडल पैरामीटर को बदल नहीं सकता, तब तक वह फिर से ऐसा कर सकता है। +- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 ट्रांसफर टूल्स**: स्मार्ट कॉन्ट्रैक्ट और UI जो नेटवर्क प्रतिभागियों को Ethereum मेननेट से Arbitrum One में नेटवर्क से संबंधित संपत्तियों को स्थानांतरित करने में सक्षम बनाते हैं। नेटवर्क प्रतिभागी डेलिगेटेड GRT, subgraphs, क्यूरेशन शेयर और Indexer's self-stake को ट्रांसफर कर सकते हैं। +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self-stake. -- **Subgraph को अपडेट करना**: Subgraph के मैनिफेस्ट, स्कीमा, या मैपिंग्स में अपडेट के साथ एक नए subgraph's manifest को जारी करने की प्रक्रिया। +- **Updating a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. - **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). From 599aa81847b7ecb9fdc6929421ebd1d588840061 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:11 -0500 Subject: [PATCH 0456/1534] New translations assemblyscript-migration-guide.mdx (French) --- .../assemblyscript-migration-guide.mdx | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/website/src/pages/fr/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/fr/resources/release-notes/assemblyscript-migration-guide.mdx index 49e76d908653..768459fdb8d3 100644 --- a/website/src/pages/fr/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/fr/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,29 +2,29 @@ title: Guide de migration de l'AssemblyScript --- -Jusqu'à présent, les subgraphs utilisaient l'une des [premières versions d'AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Enfin, nous avons ajouté la prise en charge du [le plus récent disponible](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) ! 🎉 +Jusqu'à présent, les subgraphs utilisaient l'une des [premières versions d'AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Nous avons enfin ajouté la prise en charge de la [dernière version disponible](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) ! 🎉 Cela permettra aux développeurs de subgraph d'utiliser les nouvelles fonctionnalités du langage AS et de la bibliothèque standard. -Ce guide s'applique à toute personne utilisant `graph-cli`/`graph-ts` sous la version `0.22.0`. Si vous utilisez déjà une version supérieure (ou égale), vous utilisez déjà la version `0.19.10` d'AssemblyScript 🙂 +Ce guide s'applique à tous ceux qui utilisent `graph-cli`/`graph-ts` en dessous de la version `0.22.0`. Si vous êtes déjà à une version supérieure (ou égale) à celle-ci, vous avez déjà utilisé la version `0.19.10` d'AssemblyScript 🙂 -> Remarque : Depuis `0.24.0`, `graph-node` peut prendre en charge les deux versions, en fonction de la `apiVersion` spécifiée dans le manifeste du subgraph. +> Note : A partir de `0.24.0`, `graph-node` peut supporter les deux versions, en fonction de la `apiVersion` spécifiée dans le manifeste du subgraph. ## Fonctionnalités ### Nouvelle fonctionnalité -- Les `TypedArray` peuvent désormais être construits à partir de `ArrayBuffer` en utilisant les [nouvelle méthode statique `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nouvelles fonctions de bibliothèque standard: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Les `TypedArray`s peuvent maintenant être construits à partir des `ArrayBuffer`s en utilisant la [nouvelle méthode statique `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- Nouvelles fonctions de la bibliothèque standard : `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` et `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) - Ajout de la prise en charge de x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) - Ajout de `StaticArray`, une variante de tableau plus efficace ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) - Ajout de `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implémentation de `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Ajout de la prise en charge des séparateurs dans les littéraux à virgule flottante ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Ajout du support pour les fonctions de première classe ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Ajout des fonctions intégrées : `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implementation de `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Ajout de la prise en charge des chaînes littérales de modèle ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Implémentation de l'argument `radix` sur `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Ajout de la prise en charge des séparateurs dans les nombres à virgule flottante ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Prise en charge des fonctions de première classe ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Ajouter des éléments intégrés suivants: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implémentation de `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Ajout de la prise en charge des modèles de chaînes de caractères ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) - Ajout de `encodeURI(Component)` et `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) - Ajout de `toString`, `toDateString` et `toTimeString` à `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) - Ajout de `toUTCString` pour `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) @@ -32,19 +32,19 @@ Ce guide s'applique à toute personne utilisant `graph-cli`/`graph-ts` sous la v ### Optimizations -- Les fonctions `Math` telles que `exp`, `exp2`, `log`, `log2` et `pow` ont été remplacées par des variantes plus rapides ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Légère optimisation de `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Les fonctions mathématiques telles que `exp`, `exp2`, `log`, `log2` et `pow` ont été remplacées par des variantes plus rapides ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Optimisation légère de `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) - Mise en cache de plus d'accès aux champs dans std Map et Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimiser pour des puissances de deux `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Optimisation pour les puissances de deux dans `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Autre -- Le type d'un littéral de tableau peut désormais être déduit de son contenu ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Stdlib mis à jour vers Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Le type d'un de tableau d'éléments peut maintenant être déduit de son contenu ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Mise à jour de la stdlib vers Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## Comment mettre à niveau ? -1. Modifiez vos mappages `apiVersion` dans `subgraph.yaml` en `0.0.6` : +1. Changez vos mappages `apiVersion` dans `subgraph.yaml` en `0.0.6` : ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. Mettez à jour le `graph-cli` que vous utilisez vers la version `dernière` en exécutant : +2. Mettez à jour le `graph-cli` que vous utilisez avec la version la plus récente en exécutant : ```bash # si vous l'avez installé globalement @@ -66,7 +66,7 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Faites de même pour `graph-ts`, mais au lieu de l'installer globalement, enregistrez-le dans vos dépendances principales : +3. Faites la même chose pour `graph-ts`, mais au lieu de l'installer globalement, sauvegardez-le dans vos dépendances principales : ```bash npm install --save @graphprotocol/graph-ts@latest @@ -110,7 +110,7 @@ Si vous ne savez pas lequel choisir, nous vous recommandons de toujours utiliser ### Ombrage variable -Avant de pouvoir faire de l'[observation de variables](https://en.wikipedia.org/wiki/Variable_shadowing) et un code comme celui-ci fonctionnerait : +Auparavant, vous pouviez faire un [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) et un code comme celui-ci fonctionnait : ```typescript let a = 10 @@ -141,12 +141,12 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -Pour résoudre, vous pouvez simplement remplacer l'instruction `if` par quelque chose comme ceci : +Pour résoudre ce problème, il suffit de modifier l'instruction `if` en quelque chose comme ceci : ```typescript if (!decimals) { - // ou bien + // ou if (decimals === null) { ``` @@ -155,16 +155,16 @@ La même chose s'applique si vous faites != au lieu de ==. ### Casting -Auparavant, la manière courante de faire du casting consistait simplement à utiliser le mot-clé `as`, comme ceci : +Auparavant, la façon la plus courante d'effectuer une conversion de type était d'utiliser le mot-clé `as`, comme ceci : ```typescript let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +let uint8Array = byteArray as Uint8Array // équivalent à : byteArray ``` Cependant, cela ne fonctionne que dans deux scénarios : -- Casting primitif (entre des types tels que `u8`, `i32`, `bool` ; par exemple : `let b : isize = 10 ; b as usize`); +- Conversion des types primitifs (entre des types tels que `u8`, `i32`, `bool` ; ex : `let b : isize = 10 ; b as usize`) ; - Upcasting sur l'héritage de classe (sous-classe → superclasse) Les Exemples: @@ -177,39 +177,39 @@ let c: usize = a + (b as usize) ``` ```typescript -//upcasting lors de l'héritage de classe +// conversion vers le type parent dans l'héritage des classes class Bytes extends Uint8Array {} let bytes = new Bytes(2) -// bytes // équivalent à : bytes as Uint8Array +// bytes // idem que: bytes as Uint8Array ``` -Il existe deux scénarios dans lesquels vous souhaiterez peut-être diffuser du contenu, mais l'utilisation de `as`/`var` **n'est pas sûre** : +Il y a deux cas de figure où l'on peut vouloir faire une conversion de type, mais l'utilisation de `as`/`var` **n'est pas sûre** : - Downcasting sur l'héritage de classe (superclasse → sous-classe) - Entre deux types qui partagent une superclasse ```typescript -//downcasting lors de l'héritage de classe +// conversion vers le type enfant dans l'héritage des classes class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -// uint8Array // plante à l'exécution :( +// uint8Array // crash l'exécution :( ``` ```typescript -// entre deux types qui partagent une superclasse +// entre deux types qui partagent la même superclasse class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -// bytes // plante à l'exécution :( +// bytes // crash à l'exécution :( ``` -Dans ces cas-là, vous pouvez utiliser la fonction `changetype` : +Dans ce cas, vous pouvez utiliser la fonction `changetype` : ```typescript -//downcasting lors de l'héritage de classe +// conversion vers le type enfant dans l'héritage des classes class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) @@ -217,7 +217,7 @@ changetype(uint8Array) // fonctionne :) ``` ```typescript -// entre deux types qui partagent une superclasse +// entre deux types qui partagent une même superclasse class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // fonctionne :) ``` -Si vous souhaitez simplement supprimer la nullité, vous pouvez continuer à utiliser l'opérateur `as` (ou `variable`), mais assurez-vous de savoir que la valeur ne peut pas être nulle. sinon ça va casser. +Si vous voulez juste supprimer la nullité, vous pouvez continuer à utiliser l'opérateur `as` (ou `variable`), mais assurez-vous que vous savez que la valeur ne peut pas être nulle, sinon cela causera un crash. ```typescript // supprimer la possibilité de valeur nulle (nullability) @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -Pour le cas de nullité, nous vous recommandons de jeter un œil à la [fonctionnalité de vérification de la nullité](https://www.assemblyscript.org/basics.html#nullability-checks), cela rendra votre code plus propre 🙂 +Pour le cas de la nullité, nous recommandons de jeter un coup d'œil à la [fonction de vérification de la nullité](https://www.assemblyscript.org/basics.html#nullability-checks), cela rendra votre code plus propre 🙂 Nous avons également ajouté quelques méthodes statiques supplémentaires dans certains types pour faciliter la diffusion, à savoir : @@ -249,7 +249,7 @@ Nous avons également ajouté quelques méthodes statiques supplémentaires dans ### Vérification de nullité avec accès à la propriété -Pour utiliser la [fonctionnalité de vérification de nullité](https://www.assemblyscript.org/basics.html#nullability-checks), vous pouvez utiliser soit les instructions `if`, soit l'opérateur ternaire (`?` et `:`) comme ce: +Pour utiliser la [fonction de vérification de la nullité](https://www.assemblyscript.org/basics.html#nullability-checks), vous pouvez utiliser les instructions `if` ou l'opérateur ternaire (`?` et `:`) comme suit : ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -Cependant, cela ne fonctionne que lorsque vous effectuez le `if` / ternaire sur une variable, pas sur un accès à une propriété, comme ceci : +Cependant, cela ne fonctionne que lorsque vous faites le `if` / ternaire sur une variable, et non sur l'accès à une propriété, comme ceci : ```typescript class Container { @@ -380,7 +380,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -Vous devrez vous assurer d'initialiser la valeur `total.amount`, car si vous essayez d'accéder à la dernière ligne pour la somme, elle plantera. Donc soit vous l'initialisez d'abord : +Vous devez vous assurer d'initialiser la valeur `total.amount`, car si vous essayez d'y accéder comme dans la dernière ligne pour la somme, cela va planter. Il faut donc d'abord l'initialiser : ```typescript let total = Total.load('latest') @@ -393,7 +393,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Ou vous pouvez simplement modifier votre schéma GraphQL pour ne pas utiliser de type nullable pour cette propriété, puis nous l'initialiserons à zéro à l'étape `codegen` 😉 +Ou vous pouvez simplement changer votre schéma GraphQL pour ne pas utiliser un type nullable pour cette propriété, puis nous l'initialiserons à zéro à l'étape `codegen` 😉 ```graphql type Total @entity { @@ -406,7 +406,7 @@ type Total @entity { let total = Total.load('latest') if (total === null) { - total = new Total('latest') // initialise déjà les propriétés non-nullables + total = new Total('latest') // initialise déjà les propriétés non-nullables } total.amount = total.amount + BigInt.fromI32(1) @@ -424,7 +424,7 @@ export class Something { } ``` -Le compilateur générera une erreur car vous devez soit ajouter un initialiseur pour les propriétés qui sont des classes, soit ajouter l'opérateur `!` : +Le compilateur génèrera une erreur car vous devez soit ajouter un initialisateur pour les propriétés qui sont des classes, soit ajouter l'opérateur `!` : ```typescript export class Something { @@ -450,7 +450,7 @@ export class Something { ### Initialisation du tableau -La classe `Array` accepte toujours un nombre pour initialiser la longueur de la liste, mais vous devez faire attention car des opérations comme `.push` augmenteront en fait la taille au lieu de l'ajouter au début. , Par exemple: +La classe `Array` accepte toujours un nombre pour initialiser la longueur de la liste, cependant vous devez faire attention car des opérations comme `.push` vont en fait augmenter la taille au lieu d'ajouter au début, par exemple : ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -464,12 +464,12 @@ En fonction des types que vous utilisez, par exemple les types nullables, et de ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -Pour réellement pousser au début, vous devez soit initialiser le `Array` avec une taille zéro, comme ceci : +Pour pousser au début, vous devez soit initialiser le `Tableau` avec une taille de zéro, comme ceci : ```typescript let arr = new Array(0) // [] -arr.push('quelque chose') // ["quelque chose"] +arr.push('something') // ["quelque chose"] ``` Ou vous devriez le muter via index : @@ -477,27 +477,27 @@ Ou vous devriez le muter via index : ```typescript let arr = new Array(5) // ["", "", "", "", ""] -arr[0] = 'quelque chose' // ["quelque chose", "", "", "", ""] +arr[0] = 'something' // ["quelque chose", "", "", "", ""] ``` ### Schéma GraphQL -Il ne s'agit pas d'une modification directe d'AssemblyScript, mais vous devrez peut-être mettre à jour votre fichier `schema.graphql`. +Il ne s'agit pas d'un changement direct d'AssemblyScript, mais vous devrez peut-être mettre à jour votre fichier `schema.graphql`. Vous ne pouvez désormais plus définir de champs dans vos types qui sont des listes non nullables. Si vous avez un schéma comme celui-ci : ```graphql type Something @entity { - id: Bytes! + id: Bytes! } type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # n'est plus valide + id: Bytes! + invalidField: [Something]! # n'est plus valide } ``` -Vous devrez ajouter un `!` au membre de type List, comme ceci : +Vous devrez ajouter un `!` au membre du type List, comme ceci : ```graphql type Something @entity { @@ -510,14 +510,14 @@ type MyEntity @entity { } ``` -Cela a changé en raison des différences de nullité entre les versions d'AssemblyScript et est lié au fichier `src/generated/schema.ts` (chemin par défaut, vous avez peut-être modifié cela). +Cela a changé à cause des différences de nullité entre les versions d'AssemblyScript, et c'est lié au fichier `src/generated/schema.ts` (chemin par défaut, vous pouvez l'avoir changé). ### Autre -- Alignement de `Map#set` et `Set#add` avec la spécification, en retournant `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Les tableaux n'héritent plus d'ArrayBufferView, mais sont désormais distincts ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Les classes initialisées à partir de littéraux d'objet ne peuvent plus définir de constructeur ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Le résultat d'une opération binaire `**` est désormais le dénominateur commun entier si les deux opérandes sont des entiers. Auparavant, le résultat était un float comme si vous appeliez `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Convertir `NaN` en `false` lors de la conversion en `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Lors du décalage d'une petite valeur entière de type `i8`/`u8` ou `i16`/`u16`, seuls les 3 respectivement 4 les plus petits les bits significatifs de la valeur RHS affectent le résultat, de la même manière que le résultat d'un `i32.shl` n'est affecté que par les 5 bits les moins significatifs de la valeur RHS. Exemple : `someI8 << 8` produisait auparavant la valeur `0`, mais produit désormais `someI8` en raison du masquage du RHS comme `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Correction d'un bug des comparaisons de chaînes relationnelles lorsque les tailles diffèrent ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Alignement de `Map#set` et `Set#add` avec la spécification, retournant `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Les tableaux n'héritent plus de ArrayBufferView, mais sont désormais distincts ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Les classes initialisées à partir d'objets littéraux ne peuvent plus définir de constructeur ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Le résultat d'une opération binaire `**` est maintenant l'entier de dénominateur commun si les deux opérandes sont des entiers. Auparavant, le résultat était un flottant comme si l'on appelait `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Contraindre `NaN` à `false` lors d'une conversion de type vers `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- Lors du déplacement d'une petite valeur entière de type `i8`/`u8` ou `i16`/`u16`, seuls les 3 ou 4 bits les moins significatifs de la valeur RHS affectent le résultat, de manière analogue au résultat d'un `i32.shl` qui n'est affecté que par les 5 bits les moins significatifs de la valeur RHS. Exemple : `someI8 << 8` produisait auparavant la valeur `0`, mais produit maintenant `someI8` à cause du masquage de la valeur RHS comme `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Correction d'un bug dans les comparaisons de chaînes de caractères relationnelles lorsque les tailles sont différentes ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From eb36393b1d318ebe1ef282e4dbc4149197fdfc1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:12 -0500 Subject: [PATCH 0457/1534] New translations assemblyscript-migration-guide.mdx (Spanish) --- .../assemblyscript-migration-guide.mdx | 108 +++++++++--------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/website/src/pages/es/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/es/resources/release-notes/assemblyscript-migration-guide.mdx index bfc973f982dd..354d8c68a3e8 100644 --- a/website/src/pages/es/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/es/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: Guía de Migración de AssemblyScript --- -Hasta ahora, los subgrafos han utilizado una de las [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finalmente, hemos añadido soporte para la [el más nuevo disponible](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 Esto permitirá a los desarrolladores de subgrafos utilizar las nuevas características del lenguaje AS y la librería estándar. -Esta guía es aplicable para cualquiera que use `graph-cli`/`graph-ts` bajo la versión `0.22.0`. Si ya estás en una versión superior (o igual) a esa, has estado usando la versión `0.19.10` de AssemblyScript 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> Nota: A partir de `0.24.0`, `graph-node` puede soportar ambas versiones, dependiendo del `apiVersion` especificado en el manifiesto del subgrafo. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Características ### Nueva Funcionalidad -- `TypedArray`s ahora puede construirse desde `ArrayBuffer`s usando el [nuevo `wrap` método estático](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nuevas funciones de la biblioteca estándar: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Se agregó soporte para x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Se agregó `StaticArray`, una más eficiente variante de array ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Se agregó `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Se implementó el argumento `radix` en `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Se agregó soporte para los separadores en los literales de punto flotante ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Se agregó soporte para las funciones de primera clase ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Se agregaron builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Se implementó `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Se agregó soporte para las plantillas de strings literales ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Se agregó `encodeURI(Component)` y `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Se agregó `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Se agregó `toUTCString` para `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Se agregó `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Optimizaciones -- Funciones `Math` como `exp`, `exp2`, `log`, `log2` y `pow` fueron reemplazadas por variantes más rápidas ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Optimizar ligeramente `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Caché de más accesos a campos en std Map y Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimizar para potencias de dos en `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Otros -- El tipo de un de array literal ahora puede inferirse a partir de su contenido ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Actualizado stdlib a Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## ¿Cómo actualizar? -1. Cambiar tus mappings `apiVersion` en `subgraph.yaml` a `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. Actualiza la `graph-cli` que usas a la `última` versión: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # si lo tiene instalada de forma global @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Haz lo mismo con `graph-ts`, pero en lugar de instalarlo globalmente, guárdalo en tus dependencias principales: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. Sigue el resto de la guía para arreglar los cambios que rompen el lenguaje. -5. Ejecuta `codegen` y `deploy` nuevamente. +5. Run `codegen` and `deploy` again. ## Rompiendo los esquemas @@ -110,7 +110,7 @@ Si no estás seguro de cuál elegir, te recomendamos que utilices siempre la ver ### Variable Shadowing -Antes podías hacer [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) y un código como este funcionaría: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -Para solucionarlo puedes simplemente cambiar la declaración `if` por algo así: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ Lo mismo ocurre si haces != en lugar de ==. ### Casting -La forma común de hacer el casting antes era simplemente usar la palabra clave `as`, de la siguiente forma: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray Sin embargo, esto solo funciona en dos casos: -- Casting de primitivas (entre tipos como `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting en la herencia de clases (subclase → superclase) Ejemplos: @@ -184,7 +184,7 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -Hay dos escenarios en los que puede querer cast, pero usando `as`/`var` **no es seguro**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - Downcasting en la herencia de clases (superclase → subclase) - Entre dos tipos que comparten una superclase @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -Para esos casos, puedes usar la función `changetype`: +For those cases, you can use the `changetype` function: ```typescript // downcasting on class inheritance @@ -217,7 +217,7 @@ changetype(uint8Array) // works :) ``` ```typescript -// entre dos tipos que comparten un superclass +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -Si solo quieres eliminar la anulabilidad, puedes seguir usando el `as` operador (o `variable`), pero asegúrate de que el valor no puede ser nulo, de lo contrario se romperá. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // eliminar anulabilidad @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -Para el caso de la anulabilidad se recomienda echar un vistazo al [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), hará que tu código sea más limpio 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 También hemos añadido algunos métodos estáticos en algunos tipos para facilitar el casting, son: @@ -249,7 +249,7 @@ También hemos añadido algunos métodos estáticos en algunos tipos para facili ### Comprobación de anulabilidad con acceso a la propiedad -Para usar el [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) puedes usar la declaración `if` o el operador ternario (`?` and `:`) asi: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -Sin embargo eso solo funciona cuando estás haciendo el `if` / ternario en una variable, no en un acceso a una propiedad, como este: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -Tendrás que asegurarte de inicializar el valor `total.amount`, porque si intentas acceder como en la última línea para la suma, se bloqueará. Así que o bien la inicializas primero: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -O simplemente puedes cambiar tu esquema GraphQL para no usar un tipo anulable para esta propiedad, entonces la inicializaremos como cero en el paso `codegen` 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -El compilador dará un error porque tienes que añadir un inicializador para las propiedades que son clases, o añadir el operador `!`: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### Inicialización de Array -La clase `Array` sigue aceptando un número para inicializar la longitud de la lista, sin embargo hay que tener cuidado porque operaciones como `.push` en realidad aumentarán el tamaño en lugar de añadirlo al principio, por ejemplo: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -465,7 +465,7 @@ Dependiendo de los tipos que estés utilizando, por ejemplo los anulables, y de ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -Para realmente empujar al principio deberías o bien, inicializar el `Array` con tamaño cero, así: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### Esquema GraphQL -Esto no es un cambio directo de AssemblyScript, pero es posible que tengas que actualizar tu archivo `schema.graphql`. +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. Ahora ya no puedes definir campos en tus tipos que sean Listas No Anulables. Si tienes un esquema como este: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -Tendrás que añadir un `!` al miembro del tipo Lista, así: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -Esto cambió debido a las diferencias de anulabilidad entre las versiones de AssemblyScript, y está relacionado con el archivo `src/generated/schema.ts` (ruta por defecto, puede que lo hayas cambiado). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### Otros -- Alineado `Map#set` y `Set#add` con el spec, devolviendo `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Las arrays ya no heredan de ArrayBufferView, sino que son distintas ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Las clases inicializadas a partir de objetos literales ya no pueden definir un constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- El resultado de una operación binaria `**` es ahora el entero denominador común si ambos operandos son enteros. Anteriormente, el resultado era un flotante como si se llamara a `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerción `NaN` a `false` cuando casting a `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Al desplazar un valor entero pequeño de tipo `i8`/`u8` o `i16`/`u16`, sólo los 3 o 4 bits menos significativos del valor RHS afectan al resultado, de forma análoga al resultado de un `i32.shl` que sólo se ve afectado por los 5 bits menos significativos del valor RHS. Ejemplo: `someI8 << 8` previamente producía el valor `0`, pero ahora produce `someI8` debido a enmascarar el RHS como `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Corrección de errores en las comparaciones de strings relacionales cuando los tamaños difieren ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From c2a6b5ec74f42e28855ee39c91d467e581c7dcad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:13 -0500 Subject: [PATCH 0458/1534] New translations assemblyscript-migration-guide.mdx (Arabic) --- .../assemblyscript-migration-guide.mdx | 94 +++++++++---------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/website/src/pages/ar/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/ar/resources/release-notes/assemblyscript-migration-guide.mdx index 84e00f13b4e1..9fe263f2f8b2 100644 --- a/website/src/pages/ar/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/ar/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: دليل ترحيل AssemblyScript --- -حتى الآن ، كانت ال Subgraphs تستخدم أحد [ الإصدارات الأولى من AssemblyScript ](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). أخيرًا ، أضفنا الدعم لـ [ أحدث دعم متاح ](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 سيمكن ذلك لمطوري ال Subgraph من استخدام مميزات أحدث للغة AS والمكتبة القياسية. -ينطبق هذا الدليل على أي شخص يستخدم `graph-cli`/`graph-ts` ادنى من الإصدار `0.22.0`. إذا كنت تستخدم بالفعل إصدارًا أعلى من (أو مساويًا) لذلك ، فأنت بالفعل تستخدم الإصدار `0.19.10` من AssemblyScript 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> ملاحظة: اعتبارًا من `0.24.0` ، يمكن أن يدعم `grapg-node` كلا الإصدارين ، اعتمادًا على `apiVersion` المحدد في Subgraph manifest. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## مميزات ### وظائف جديدة -- يمكن الآن إنشاء `TypedArray` من `ArrayBuffer` باستخدام [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) جديد -- دوال المكتبة القياسية الجديدة`String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- تمت إضافة دعم لـ x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- تمت إضافة `StaticArray` متغير مصفوفة أكثر كفاءة([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- تمت إضافة`Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- تم تنفيذ`radix` argument على `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- دعم إضافي للفواصل في floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- دعم إضافي لدوال الفئة الأولى ([ v0.14.0 ](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- إضافة البناء: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- تنفيذ `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- دعم إضافي لقوالب literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- أضف`encodeURI(Component)` و `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- أضافة `toString`, `toDateString` و `toTimeString` إلى `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- أضافة `toUTCString` ل `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- أضافة `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### التحسينات -- دوال `Math` مثل `exp`, `exp2`, `log`, `log2` و `pow` تم استبدالها بمتغيرات أسرع ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1))أكثر تحسينا -- تخزين المزيد من الوصول للحقول في std Map و Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- قم بتحسين قدرات اثنين في `ipow32 / 64` ([ v0.18.2 ](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### آخر -- يمكن الآن استنتاج نوع array literal من محتوياتها([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- تم تحديث stdlib إلى Unicode 13.0.0([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## كيف تقوم بالترقية؟ -1. تغيير `apiVersion` Mappings الخاص بك في `subgraph.yaml` إلى `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. قم بتحديث `graph-cli` الذي تستخدمه إلى `latest` عن طريق تشغيل: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # if you have it globally installed @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. افعل الشيء نفسه مع `graph-ts` ، ولكن بدلاً من التثبيت بشكل عام ، احفظه في dependencies الرئيسية: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. اتبع بقية الدليل لاصلاح التغييرات الهامة في اللغة. -5. قم بتشغيل `codegen` و `deploy` مرة أخرى. +5. Run `codegen` and `deploy` again. ## تغييرات هامة @@ -110,7 +110,7 @@ maybeValue.aMethod() ### Variable Shadowing -قبل أن تتمكن من إجراء [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) وتعمل تعليمات برمجية مثل هذه: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -لحل المشكلة يمكنك ببساطة تغيير عبارة `if` إلى شيء مثل هذا: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i ### Casting -كانت الطريقة الشائعة لإجراء ال Casting من قبل هي استخدام كلمة `as` ، مثل هذا: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray لكن هذا لا يعمل إلا في سيناريوهين: -- Primitive casting (بين انواع مثل`u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting على وراثة الفئة (subclass → superclass) أمثلة: @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -في هذه الحالة يمكنك إستخدام دالة `changetype`: +For those cases, you can use the `changetype` function: ```typescript // downcasting on class inheritance @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -إذا كنت تريد فقط إزالة nullability ، فيمكنك الاستمرار في استخدام `as` (أو `variable`) ، ولكن تأكد من أنك تعرف أن القيمة لا يمكن أن تكون خالية ، وإلا فإنه سوف يتوقف. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // remove nullability @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -بالنسبة لحالة ال nullability ، نوصي بإلقاء نظرة على [ مميزة التحقق من nullability ](https://www.assemblyscript.org/basics.html#nullability-checks) ، ستجعل الكود أكثر وضوحا🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Also we've added a few more static methods in some types to ease casting, they are: @@ -249,7 +249,7 @@ Also we've added a few more static methods in some types to ease casting, they a ### التحقق من Nullability مع الوصول الى الخاصية -لاستخدام [ مميزة التحقق من nullability ](https://www.assemblyscript.org/basics.html#nullability-checks) ، يمكنك استخدام عبارات `if` أو عامل التشغيل الثلاثي (`؟` و`:`) مثل هذا: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -ومع ذلك ، فإن هذا لا يعمل إلا عند تنفيذ `if` / ternary على متغير ، وليس على الوصول للخاصية ، مثل هذا: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -فستحتاج إلى التأكد من تهيئة قيمة `total.amount` ، لأنه إذا حاولت الوصول كما في السطر الأخير للمجموع ، فسوف يتعطل. لذلك إما أن تقوم بتهيئته أولاً: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -أو يمكنك فقط تغيير مخطط GraphQL الخاص بك بحيث لا تستخدم نوع nullable لهذه الخاصية ، ثم سنقوم بتهيئته على أنه صفر في الخطوة`codegen`😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -فإن المترجم سيخطئ لأنك ستحتاج إما لإضافة مُهيئ للخصائص والتي هي فئات (classes)، أو إضافة عامل التشغيل `!`: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -515,10 +515,10 @@ This changed because of nullability differences between AssemblyScript versions, ### آخر -- تم ضبط `Map#set` و`Set#add` مع المواصفات ، راجعا بـ `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- لم تعد المصفوفة ترث من ArrayBufferView ، لكنها أصبحت متميزة الآن ([ v0.10.0 ](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- الفئات المهيئة من كائن لم يعد بإمكانها تعريف باني (constructor) لها ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- نتيجة العملية الثنائية `**` هي الآن العدد الصحيح للمقام المشترك إذا كان كلا المعاملين عددا صحيحا. في السابق كانت النتيجة float كما لو كان استدعاء `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- إجبار`NaN` إلى `false` عندما ال casting إلى`bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- عند إزاحة قيمة عدد صحيح صغير من النوع `i8`/`u8` أو `i16`/`u16` ، فإن فقط الـ 3 على التوالي لـ 4 بتات الأقل أهمية من قيمة RHS تؤثر على النتيجة ، على غرار نتيجة `i32.shl` المتأثرة فقط بالـ 5 بتات الأقل أهمية من قيمة RHS.. مثال: `someI8 << 8` أنتج سابقًا القيمة `0` ، ولكنه ينتج الآن `SomeI8` نظرًا لإخفاء RHS كـ `8 & 7 = 0`(3 بت) ([ v0.17.0 ](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- إصلاح خلل مقارنات السلاسل العلائقية (relational string) عندما تختلف الأحجام ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From 10b6b7a0fab668693af7abe8ec1ebbda97316bd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:14 -0500 Subject: [PATCH 0459/1534] New translations assemblyscript-migration-guide.mdx (Czech) --- .../assemblyscript-migration-guide.mdx | 108 +++++++++--------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/website/src/pages/cs/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/cs/resources/release-notes/assemblyscript-migration-guide.mdx index d1b9eb00bc04..756873dd8fbb 100644 --- a/website/src/pages/cs/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/cs/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: Průvodce migrací AssemblyScript --- -Dosud se pro subgrafy používala jedna z [prvních verzí AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Konečně jsme přidali podporu pro [nejnovější dostupnou verzi](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 To umožní vývojářům podgrafů používat novější funkce jazyka AS a standardní knihovny. -Tato příručka platí pro všechny, kteří používají `graph-cli`/`graph-ts` pod verzí `0.22.0`. Pokud již máte vyšší (nebo stejnou) verzi, používáte již verzi `0.19.10` AssemblyScript 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> Poznámka: Od verze `0.24.0` může `graph-node` podporovat obě verze v závislosti na `apiVersion` uvedené v manifestu podgrafu. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Funkce ### Nové funkce -- `TypedArray`s lze nyní sestavit z `ArrayBuffer`s pomocí [nové `wrap` statické metody](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nové funkce standardní knihovny: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`a `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Přidána podpora pro x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Přidána `StaticArray`, efektivnější varianta pole ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Přidáno `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implementován argument `radix` na `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Přidána podpora oddělovačů v literálech s plovoucí desetinnou čárkou ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Přidána podpora funkcí první třídy ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Přidání vestavěných: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implementovat `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Přidána podpora literálních řetězců šablon ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Přidat `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Přidat `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Přidat `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Přidat `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Optimalizace -- `Math` funkce jako `exp`, `exp2`, `log`, `log2` a `pow` byly nahrazeny rychlejšími variantami ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Mírná optimalizace `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Vyrovnávací paměť pro více přístupů k polím std Map a Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimalizace pro mocniny dvou v `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Jiný -- Typ literálu pole lze nyní odvodit z jeho obsahu ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Aktualizace stdlib na Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## Jak provést upgrade? -1. Změňte mapování `apiVersion` v `subgraph.yaml` na `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. Aktualizujte používaný `graph-cli` na `nejnovější` verzi spuštěním: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # if you have it globally installed @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Totéž proveďte pro `graph-ts`, ale místo globální instalace jej uložte do hlavních závislostí: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. Postupujte podle zbytku příručky a opravte změny, které narušují jazyk. -5. Znovu spusťte `codegen` a `deploy`. +5. Run `codegen` and `deploy` again. ## Prolomení změn @@ -110,7 +110,7 @@ Pokud si nejste jisti, kterou verzi zvolit, doporučujeme vždy použít bezpeč ### Proměnlivé stínování -Dříve jste mohli udělat [stínování proměnné](https://en.wikipedia.org/wiki/Variable_shadowing) a kód jako tento by fungoval: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -Pro vyřešení můžete jednoduše změnit příkaz `if` na něco takového: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ Totéž platí, pokud místo == použijete !=. ### Casting -Dříve se běžně používalo klíčové slovo `jako`, například takto: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray To však funguje pouze ve dvou případech: -- Primitivní casting (mezi typy jako `u8`, `i32`, `bool`; např: `let b: isize = 10; b jako usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting na dědičnost tříd (podtřída → nadtřída) Příklady: @@ -184,7 +184,7 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -Existují dva scénáře, kdy můžete chtít provést obsazení, ale použití `jako`/`var` **není bezpečné**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - Downcasting při dědění tříd (nadtřída → podtřída) - Mezi dvěma typy, které mají společnou nadtřídu @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -Pro tyto případy můžete použít funkci `changetype`: +For those cases, you can use the `changetype` function: ```typescript // downcasting on class inheritance @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -Pokud chcete pouze odstranit nullability, můžete nadále používat operátor `jako` (nebo `proměnná`), ale ujistěte se, že hodnota nemůže být nulová, jinak dojde k rozbití. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // remove nullability @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -Pro případ nullability doporučujeme podívat se na funkci [kontrola nullability](https://www.assemblyscript.org/basics.html#nullability-checks), díky ní bude váš kód čistší 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Také jsme přidali několik dalších statických metod v některých typy abychom usnadnili odlévání, jsou to: @@ -249,7 +249,7 @@ Také jsme přidali několik dalších statických metod v některých typy abyc ### Kontrola nulovatelnosti s přístupem k vlastnostem -Chcete-li použít funkci [kontroly nulovatelnosti](https://www.assemblyscript.org/basics.html#nullability-checks), můžete použít buď příkazy `if`, nebo ternární operátor (`?` a `:`), například takto: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -To však funguje pouze tehdy, když provádíte `if` / ternár na proměnné, nikoli na přístupu k vlastnosti, jako je tento: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -Musíte se ujistit, že jste inicializovali hodnotu `total.amount`, protože pokud se pokusíte přistupovat jako v posledním řádku pro součet, dojde k pádu. Takže ji buď nejprve inicializujte: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Nebo můžete změnit své schéma GraphQL tak, aby nepoužívalo nulovatelný typ pro tuto vlastnost, pak ji inicializujeme jako nulu v kroku `codegen` 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -Překladač bude chybovat, protože buď musíte přidat inicializátor pro vlastnosti, které jsou třídami, nebo přidat operátor `!`: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### Inicializace polí -Třída `Array` stále přijímá číslo pro inicializaci délky seznamu, ale měli byste si dát pozor, protože operace jako `.push` ve skutečnosti zvětší velikost, místo aby například přidávala na začátek: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -462,10 +462,10 @@ arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( V závislosti na typech, které používáte, např. nulovatelných, a na způsobu přístupu k nim se můžete setkat s chybou běhu, jako je tato: ``` -ERRO Handler přeskočen z důvodu selhání provádění, chyba: Mapování přerušeno na ~lib/array.ts, řádek 110, sloupec 40, se zprávou: Typ prvku musí být nulovatelný, pokud je pole děravé wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -Chcete-li skutečně tlačit na začátku, měli byste buď inicializovat `Array` s velikostí nula, například takto: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### Schéma GraphQL -Nejedná se o přímou změnu AssemblyScript, ale možná budete muset aktualizovat soubor `schema.graphql`. +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. Nyní již nelze v typech definovat pole, která jsou nenulovatelnými seznamy. Pokud máte takovéto schéma: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -Budete muset přidat `!` k členu typu List, například takto: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -To se změnilo kvůli rozdílům v nullability mezi verzemi AssemblyScript a souvisí to se souborem `src/generated/schema.ts` (výchozí cesta, možná jste ji změnili). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### Jiný -- Zarovnání `Map#set` a `Set#add` se specifikací, vrácení `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Pole již nedědí od ArrayBufferView, ale jsou nyní samostatná ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Třídy inicializované z objektových literálů již nemohou definovat konstruktor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Výsledkem binární operace `**` je nyní celé číslo se společným jmenovatelem, pokud jsou oba operandy celá čísla. Dříve byl výsledkem float, jako kdybyste volali `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Vynucení `NaN` na `false` při převodu na `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Při posunu malé celočíselné hodnoty typu `i8`/`u8` nebo `i16`/`u16` ovlivňují výsledek pouze 3, resp. 4 nejméně významné bity hodnoty RHS, obdobně jako výsledek `i32.shl` ovlivňuje pouze 5 nejméně významných bitů hodnoty RHS. Příklad: `someI8 << 8` dříve dávalo hodnotu `0`, ale nyní dává `someI8` kvůli maskování RHS jako `8 & 7 = 0` (3 bity) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Oprava chyb při porovnávání relačních řetězců při rozdílných velikostech ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From f477ed7283f32e1bf73344178b2899cd366a5e2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:15 -0500 Subject: [PATCH 0460/1534] New translations assemblyscript-migration-guide.mdx (German) --- .../assemblyscript-migration-guide.mdx | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/website/src/pages/de/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/de/resources/release-notes/assemblyscript-migration-guide.mdx index fb1ad8beb382..d5ffa00d0e1f 100644 --- a/website/src/pages/de/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/de/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: AssemblyScript Migration Guide --- -Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Bis jetzt haben Subgraphen eine der [ersten Versionen von AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6) verwendet. Endlich haben wir Unterstützung für die [neueste verfügbare Version](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) hinzugefügt! 🎉 That will enable subgraph developers to use newer features of the AS language and standard library. -This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 +Diese Anleitung gilt für alle, die `graph-cli`/`graph-ts` unter Version `0.22.0` verwenden. Wenn Sie bereits eine höhere (oder gleiche) Version als diese haben, haben Sie bereits Version `0.19.10` von AssemblyScript verwendet 🙂 -> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. +> Anmerkung: Ab `0.24.0` kann `graph-node` beide Versionen unterstützen, abhängig von der im Subgraph-Manifest angegebenen `apiVersion`. ## Features ### New functionality -- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray` kann nun aus `ArrayBuffer` mit Hilfe der [neuen statischen Methode `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) erstellt werden +- Neue Standard-Bibliotheksfunktionen: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`und `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Unterstützung für x instanceof GenericClass hinzugefügt ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- `StaticArray`, hinzugefügt, eine effizientere Array-Variante ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- `Array#flat` hinzugefügt ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- `radix`-Argument bei `Number#toString` implementiert ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Unterstützung für Trennzeichen in Fließkomma-Literalen hinzugefügt ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Unterstützung für Funktionen erster Klasse hinzugefügt ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Buildins hinzugefügt: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- `Array/TypedArray/String#at` implementiert ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Unterstützung für Template-Literal-Strings hinzugefügt ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Hinzufügen von `encodeURI(Component)` und `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Hinzufügen von `toString`, `toDateString` und `toTimeString` zu `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Hinzufügen von `toUTCString` für `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Hinzufügen von `nonnull/NonNullable` integrierten Typ ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Optimizations -- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math`-Funktionen wie `exp`, `exp2`, `log`, `log2` und `pow` wurden durch schnellere Varianten ersetzt ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Leicht optimierte `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Mehr Feldzugriffe in std Map und Set zwischengespeichert ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimieren für Zweierpotenzen in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Other -- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Der Typ eines Array-Literal kann nun aus seinem Inhalt abgeleitet werden ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- stdlib auf Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) aktualisiert ## How to upgrade? -1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: +1. Ändern Sie Ihre Mappings `apiVersion` in `subgraph.yaml` auf `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. Update the `graph-cli` you're using to the `latest` version by running: +2. Aktualisieren Sie die `graph-cli`, die Sie verwenden, auf die `latest` Version, indem Sie sie ausführen: ```bash # if you have it globally installed @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: +3. Machen Sie dasselbe für `graph-ts`, aber anstatt es global zu installieren, speichern Sie es in Ihren Hauptabhängigkeiten: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. Follow the rest of the guide to fix the language breaking changes. -5. Run `codegen` and `deploy` again. +5. Führen Sie `codegen` und `deploy` erneut aus. ## Breaking changes @@ -110,7 +110,7 @@ If you are unsure which to choose, we recommend always using the safe version. I ### Variable Shadowing -Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: +Früher konnte man [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) machen und Code wie dieser würde funktionieren: ```typescript let a = 10 @@ -141,12 +141,12 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -To solve you can simply change the `if` statement to something like this: +Zur Lösung des Problems können Sie die `if`-Anweisung einfach wie folgt ändern: ```typescript if (!decimals) { - // or + // oder if (decimals === null) { ``` @@ -155,7 +155,7 @@ The same applies if you're doing != instead of ==. ### Casting -The common way to do casting before was to just use the `as` keyword, like this: +Früher war es üblich, das Schlüsselwort `as` für das Casting zu verwenden, etwa so: ```typescript let byteArray = new ByteArray(10) @@ -164,10 +164,10 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray However this only works in two scenarios: -- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Primitives Casting (zwischen Typen wie `u8`, `i32`, `bool`; z. B.: `let b: isize = 10; b as usize`); - Upcasting on class inheritance (subclass → superclass) -Examples: +Beispiele: ```typescript // primitive casting @@ -177,55 +177,55 @@ let c: usize = a + (b as usize) ``` ```typescript -// upcasting on class inheritance +// upcasting bei Klassenvererbung class Bytes extends Uint8Array {} let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array +// bytes // gleich wie: bytes as Uint8Array ``` -There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: +Es gibt zwei Szenarien, in denen man casten möchte, aber die Verwendung von `as`/`var` **ist nicht sicher**: - Downcasting on class inheritance (superclass → subclass) - Between two types that share a superclass ```typescript -// downcasting on class inheritance +// Downcasting bei Klassenvererbung class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( +// uint8Array // bricht zur Laufzeit ab :( ``` ```typescript -// between two types that share a superclass +// zwischen zwei Typen, die sich eine Oberklasse teilen class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -// bytes // breaks in runtime :( +// bytes // bricht zur Laufzeit ab :( ``` -For those cases, you can use the `changetype` function: +Für diese Fälle können Sie die Funktion `changetype` verwenden: ```typescript -// downcasting on class inheritance +// Downcasting bei Klassenvererbung class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) +changetype(uint8Array) // funktioniert :) ``` ```typescript -// between two types that share a superclass +// zwischen zwei Typen, die sich eine Oberklasse teilen class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -changetype(bytes) // works :) +changetype(bytes) // funktioniert :) ``` -If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. +Wenn Sie nur die Nullbarkeit entfernen wollen, können Sie weiterhin den `as`-Operator (oder `variable`) verwenden, aber stellen Sie sicher, dass Sie wissen, dass der Wert nicht Null sein kann, sonst bricht es. ```typescript // remove nullability @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 +Für den Fall der Nullbarkeit empfehlen wir, einen Blick auf die [Nullability-Check-Funktion] (https://www.assemblyscript.org/basics.html#nullability-checks) zu werfen, sie wird Ihren Code sauberer machen 🙂 Also we've added a few more static methods in some types to ease casting, they are: @@ -249,7 +249,7 @@ Also we've added a few more static methods in some types to ease casting, they a ### Nullability check with property access -To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: +Um die [Nullability-Check-Funktion] (https://www.assemblyscript.org/basics.html#nullability-checks) zu verwenden, können Sie entweder `if`-Anweisungen oder den ternären Operator (`?` und `:`) wie folgt verwenden: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: +Das funktioniert jedoch nur, wenn Sie das `if` / ternär auf eine Variable anwenden, nicht auf einen Eigenschaftszugriff, wie hier: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: +Sie müssen sicherstellen, dass Sie den Wert `total.amount` initialisieren, denn wenn Sie versuchen, wie in der letzten Zeile auf die Summe zuzugreifen, wird das Programm abstürzen. Sie müssen ihn also entweder zuerst initialisieren: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 +Oder Sie können einfach Ihr GraphQL-Schema so ändern, dass Sie keinen nullbaren Typ für diese Eigenschaft verwenden, dann werden wir sie im Schritt `codegen` mit Null initialisieren 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: +Der Compiler wird einen Fehler machen, weil Sie entweder einen Initialisierer für die Eigenschaften hinzufügen müssen, die Klassen sind, oder den Operator `!` hinzufügen müssen: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### Array initialization -The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: +Die Klasse `Array` akzeptiert immer noch eine Zahl, um die Länge der Liste zu initialisieren, aber Sie sollten vorsichtig sein, weil Operationen wie `.push` die Größe tatsächlich erhöhen, anstatt z.B. zum Anfang hinzuzufügen: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -465,7 +465,7 @@ Depending on the types you're using, eg nullable ones, and how you're accessing ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -To actually push at the beginning you should either, initialize the `Array` with size zero, like this: +Um tatsächlich am Anfang zu pushen, sollte man entweder das `Array` mit der Größe Null initialisieren, wie hier: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### GraphQL schema -This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. +Dies ist keine direkte AssemblyScript-Änderung, aber Sie müssen möglicherweise Ihre Datei `schema.graphql` aktualisieren. Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -You'll have to add an `!` to the member of the List type, like this: +Sie müssen ein `!` an das Mitglied des Typs List hinzufügen, etwa so: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). +Dies hat sich aufgrund von Unterschieden in der Nullbarkeit zwischen AssemblyScript-Versionen geändert und hängt mit der Datei `src/generated/schema.ts` (Standardpfad, vielleicht haben Sie diesen geändert) zusammen. ### Other -- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- `Map#set` und `Set#add` wurden an die Spezifikation angepasst und geben `this` zurück ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays erben nicht mehr von ArrayBufferView, sondern sind jetzt eigenständig ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Klassen, die aus Objektliteralen initialisiert werden, können nicht mehr einen Konstruktor definieren ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Das Ergebnis einer `**`-Binäroperation ist jetzt die Ganzzahl im gemeinsamen Nenner, wenn beide Operanden Ganzzahlen sind. Zuvor war das Ergebnis eine Fließkommazahl, wie beim Aufruf von `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- `NaN` auf `false` verzerren, wenn nach ‚bool‘ gecastet wird ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- Beim Verschieben eines kleinen Integer-Wertes vom Typ `i8`/`u8` oder `i16`/`u16` beeinflussen nur die 3 bzw. 4 niedrigstwertigen Bits des RHS-Wertes das Ergebnis, analog zum Ergebnis einer `i32.shl`, das nur von den 5 niedrigstwertigen Bits des RHS-Wertes beeinflusst wird. Beispiel: `someI8 << 8` erzeugte zuvor den Wert `0`, erzeugt aber jetzt `someI8`, da die RHS als `8 & 7 = 0` (3 Bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) maskiert wird +- Fehlerbehebung bei relationalen String-Vergleichen bei unterschiedlichen Größen ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From 79b1cd1480ddde32cf33081947f1bd5da4facf3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:16 -0500 Subject: [PATCH 0461/1534] New translations assemblyscript-migration-guide.mdx (Italian) --- .../assemblyscript-migration-guide.mdx | 124 +++++++++--------- 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/website/src/pages/it/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/it/resources/release-notes/assemblyscript-migration-guide.mdx index b6bd7ecc38d2..3b4b10c8454e 100644 --- a/website/src/pages/it/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/it/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: Guida alla migrazione di AssemblyScript --- -Finora i subgraph utilizzavano una delle [prime versioni di AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finalmente abbiamo aggiunto il supporto per la [più recente disponibile](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 Ciò consentirà agli sviluppatori di subgraph di utilizzare le nuove caratteristiche del linguaggio AS e della libreria standard. -Questa guida si applica a chiunque utilizzi `graph-cli`/ `graph-ts` al di sotto della versione `0.22.0`. Se siete già a una versione superiore (o uguale) a questa, avete già utilizzato la versione `0.19.10` di AssemblyScript 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> Nota: A partire da `0.24.0`, `graph-node` può supportare entrambe le versioni, a seconda della `apiVersion` specificata nel manifest del subgraph. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Caratteristiche ### Nuova funzionalità -- `TypedArray` possono ora essere costruiti da `ArrayBuffer` utilizzando il [nuovo metodo statico `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nuove funzioni di libreria standard: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` e `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Aggiunto il supporto per x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Aggiunto `StaticArray`, una variante di array più efficiente ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Aggiunto `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implementato l'argomento `radix` su `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Aggiunto il supporto per i separatori nei letterali in virgola mobile ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Aggiunto il supporto per le funzioni di prima classe ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Aggiunti i builtin: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implementati `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Aggiunto il supporto per le stringhe letterali dei template ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Aggiunto `encodeURI(Component)` e `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Aggiunto `toString`, `toDateString` e `toTimeString` a `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Aggiunto `toUTCString` per `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Aggiunto il tipo builtin `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Ottimizzazioni -- Le funzioni `matematiche` come `exp`, `exp2`, `log`, `log2` e `pow` sono state sostituite da varianti più rapide ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Ottimizzato leggermente `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cache di più accessi ai campi in std Map e Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Ottimizzato per le potenze di due in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Altro -- Il tipo di un letterale di array può ora essere dedotto dal suo contenuto ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Aggiornato stdlib a Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## Come aggiornare? -1. Modificare le mappature `apiVersion` in `subgraph.yaml` a `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,24 +56,24 @@ dataSources: ... ``` -2. Aggiornare il `graph-cli` in uso alla versione `ultima` eseguendo: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash -# se è installato globalmente +# se è installato globalmente npm install --global @graphprotocol/graph-cli@latest # o nel proprio subgraph, se è una dipendenza di dev npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Fare lo stesso per `graph-ts`, ma invece di installarlo globalmente, salvarlo nelle dipendenze principali: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. Seguire il resto della guida per correggere le modifiche alla lingua. -5. Eseguire di nuovo `codegen` e `deploy`. +5. Run `codegen` and `deploy` again. ## Cambiamenti di rottura @@ -110,7 +110,7 @@ Se non si è sicuri di quale scegliere, si consiglia di utilizzare sempre la ver ### Shadowing della variabile -Prima si poteva fare lo [ shadowing della variabile](https://en.wikipedia.org/wiki/Variable_shadowing) e il codice come questo funzionava: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -Per risolvere il problema è sufficiente modificare l'istruzione `if` in qualcosa di simile: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ Lo stesso vale se si fa != invece di ==. ### Casting -Il modo più comune per effettuare il casting era quello di utilizzare la parola chiave `as`, come in questo caso: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray Tuttavia, questo funziona solo in due scenari: -- Casting primitivo (tra tipi come `u8`, `i32`, `bool`; ad esempio: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting sull'ereditarietà delle classi (subclasse → superclasse) Esempi: @@ -184,48 +184,48 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -Ci sono due scenari in cui potreste voler effettuare casting, ma l'uso `as`/`var` **non è sicuro**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - Downcasting sull'ereditarietà delle classi (superclasse → subclasse) - Tra due tipi che condividono una superclasse ```typescript -// downcasting sull'ereditarietà delle classi +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -// uint8Array // si interrompe in fase di esecuzione :( +// uint8Array // breaks in runtime :( ``` ```typescript -// tra due tipi che condividono una superclasse +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -// bytes // si interrompe in fase di esecuzione :( +// bytes // breaks in runtime :( ``` -In questi casi, si può usare la funzione `changetype`: +For those cases, you can use the `changetype` function: ```typescript -// downcasting sull'ereditarietà delle classi +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -changetype(uint8Array) // funziona :) +changetype(uint8Array) // works :) ``` ```typescript -// tra due tipi che condividono una superclasse +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -changetype(bytes) // funziona :) +changetype(bytes) // works :) ``` -Se si vuole solo rimuovere la nullità, si può continuare a usare l'operatore `as` (oppure `variabile`), ma assicurarsi di sapere che il valore non può essere nullo, altrimenti si interromperà. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // rimuovere la nullità @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -Per il caso della nullità si consiglia di dare un'occhiata alla [funzione verifica della nullità](https://www.assemblyscript.org/basics.html#nullability-checks), che renderà il codice più pulito 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Inoltre abbiamo aggiunto alcuni metodi statici in alcuni tipi per facilitare il casting, che sono: @@ -249,7 +249,7 @@ Inoltre abbiamo aggiunto alcuni metodi statici in alcuni tipi per facilitare il ### Verifica della nullità con accesso alle proprietà -Per utilizzare la [di funzione controllo della nullità](https://www.assemblyscript.org/basics.html#nullability-checks) si possono usare le istruzioni `if` oppure l'operatore ternario (`?` e `:`) come questo: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -Tuttavia, questo funziona solo quando si esegue il `if` / ternario su una variabile, non sull'accesso a una proprietà, come in questo caso: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -È necessario assicurarsi di inizializzare il valore `total.amount`, perché se si tenta di accedervi come nell'ultima riga per la somma, il programma si blocca. Quindi bisogna inizializzarlo prima: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Oppure si può semplicemente modificare lo schema GraphQL per non utilizzare un tipo nullable per questa proprietà, quindi la inizializzeremo come zero nel passaggio `codegen` 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -Il compilatore darà un errore perché è necessario aggiungere un initializer per le proprietà che sono classi, oppure aggiungere l'operatore `!`: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### Inizializzazione del Array -La classe `Array` accetta ancora un numero per inizializzare la lunghezza dell'elenco, ma bisogna fare attenzione perché operazioni come `.push` aumentano effettivamente la dimensione invece di aggiungere all'inizio, ad esempio: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -465,7 +465,7 @@ A seconda dei tipi utilizzati, ad esempio quelli nullable, e del modo in cui vi ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -Per spingere effettivamente all'inizio, si dovrebbe inizializzare l'`Array` con dimensione zero, in questo modo: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### Schema GraphQL -Non si tratta di una modifica diretta di AssemblyScript, ma potrebbe essere necessario aggiornare il file `schema.graphql`. +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. Ora non è più possibile definire campi nei tipi che sono elenchi non nulli. Se si ha uno schema come questo: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -Si dovrà aggiungere un `!` al membro del tipo List, in questo modo: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -Questo è cambiato a causa delle differenze di nullabilità tra le versioni di AssemblyScript ed è legato al file `src/generated/schema.ts` (percorso predefinito, potrebbe essere stato modificato). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### Altro -- Allinea `Map#set` e `Set#add` con le specifiche, restituendo `questo` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Gli Array non ereditano più da ArrayBufferView, ma sono ora distinti ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Le classi inizializzate a partire da letterali di oggetti non possono più definire un costruttore ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Il risultato di un'operazione binaria `**` è ora l'intero a denominatore comune se entrambi gli operandi sono interi. In precedenza, il risultato era un float, come se si chiamasse `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerenzia `NaN` a `false` quando viene lanciato a `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Quando si sposta un piccolo valore intero di tipo `i8`/`u8` oppure `i16`/`u16`, solo i 3 o 4 bit meno significativi del valore RHS influiscono sul risultato, analogamente al risultato di un `i32.shl` che viene influenzato solo dai 5 bit meno significativi del valore RHS. Esempio: `someI8 << 8` prima produceva il valore `0`, ma ora produce `someI8` a causa del mascheramento del RHS come `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Correzione del bug dei confronti tra stringhe relazionali quando le dimensioni sono diverse ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From 9faca8595efcf6caf767da6495551a8c6f87177b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:17 -0500 Subject: [PATCH 0462/1534] New translations assemblyscript-migration-guide.mdx (Japanese) --- .../assemblyscript-migration-guide.mdx | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ja/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/ja/resources/release-notes/assemblyscript-migration-guide.mdx index 766fbb6c80a3..88e5aea91168 100644 --- a/website/src/pages/ja/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/ja/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,13 +2,13 @@ title: AssemblyScript マイグレーションガイド --- -これまでサブグラフは、[AssemblyScript の最初のバージョン](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6)を使用していました。 ついに[最新のバージョン](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10)(v0.19.10) のサポートを追加しました! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 これにより、サブグラフの開発者は、AS 言語と標準ライブラリの新しい機能を使用できるようになります。 -このガイドは、バージョン`0.22.0`以下の`graph-cli`/`graph-ts` をお使いの方に適用されます。 もしあなたがすでにそれ以上のバージョンにいるなら、あなたはすでに AssemblyScript のバージョン`0.19.10` を使っています。 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> 注:`0.24.0`以降、`graph-node`はサブグラフマニフェストで指定された`apiVersion`に応じて、両方のバージョンをサポートしています。 +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## 特徴 @@ -44,7 +44,7 @@ title: AssemblyScript マイグレーションガイド ## アップグレードの方法 -1. `subgraph.yaml`のマッピングの`apiVersion`を`0.0.6`に変更してください。 +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. 使用している`graph-cli`を`最新版`に更新するには、次のように実行します。 +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # if you have it globally installed @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. `graph-ts`についても同様ですが、グローバルにインストールするのではなく、メインの依存関係に保存します。 +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. ガイドの残りの部分に従って、言語の変更を修正します。 -5. `codegen`を実行し、再度`deploy`します。 +5. Run `codegen` and `deploy` again. ## 変更点 @@ -110,7 +110,7 @@ maybeValue.aMethod() ### 変数シャドウイング -以前は、[変数のシャドウイング](https://en.wikipedia.org/wiki/Variable_shadowing)を行うことができ、次のようなコードが動作していました。 +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript et a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -解決するには、 `if` 文を以下のように変更するだけです。 +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i ### 鋳造 -以前の一般的なキャストの方法は、次のように`as`キーワードを使うだけでした。 +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray しかし、これは 2 つのシナリオでしか機能しません。 -- プリミティブなキャスト(between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - クラス継承のアップキャスティング(サブクラス → スーパークラス) 例 @@ -184,7 +184,7 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -キャストしたくても、`as`/`var`を使うと**安全ではない**というシナリオが 2 つあります。 +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - クラス継承のダウンキャスト(スーパークラス → サブクラス) - スーパークラスを共有する 2 つの型の間 @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -このような場合には、`changetype`関数を使用します。 +For those cases, you can use the `changetype` function: ```typescript // downcasting on class inheritance @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -単に null 性を除去したいだけなら、`as` オペレーター(or `variable`)を使い続けることができますが、値が null ではないことを確認しておかないと壊れてしまいます。 +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // remove nullability @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -Nullability については、[nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks)を利用することをお勧めします。それはあなたのコードをよりきれいにします🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 また、キャストを容易にするために、いくつかの型にスタティックメソッドを追加しました。 @@ -249,7 +249,7 @@ Nullability については、[nullability check feature](https://www.assemblysc ### プロパティアクセスによる Nullability チェック -[nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks)を使用するには、次のように`if`文や三項演算子(`?` and `:`) を使用します。 +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -しかし、これは、以下のように、プロパティのアクセスではなく、変数に対して`if`/ternary を行っている場合にのみ機能します。 +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -`total.amount`の値を確実に初期化する必要があります。なぜなら、最後の行の sum のようにアクセスしようとすると、クラッシュしてしまうからです。 そのため、最初に初期化する必要があります。 +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -あるいは、このプロパティに nullable 型を使用しないように GraphQL スキーマを変更することもできます。そうすれば、`コード生成`の段階でゼロとして初期化されます。😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -コンパイラがエラーになるのは、クラスであるプロパティにイニシャライザを追加するか、`!` オペレーターを追加する必要があるからです。 +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### 配列の初期化 -`Array`クラスは、リストの長さを初期化するための数値を依然として受け取ります。しかし、例えば`.push`のような操作は、先頭に追加するのではなく、実際にサイズを大きくするので、注意が必要です。 +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -465,7 +465,7 @@ arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -実際に最初にプッシュするには、以下のように、サイズゼロの `Array`を初期化する必要があります: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### GraphQLスキーマ -これは直接のAssemblyScriptの変更ではありませんが、`schema.graphql` ファイルを更新する必要があるかもしれません。 +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. この変更により、Non-Nullable Listのフィールドを型に定義することができなくなりました。仮に、以下のようなスキーマがあった場合: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -List タイプのメンバーには、以下のように`!` を付ける必要があります: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,7 +511,7 @@ type MyEntity @entity { } ``` -これはAssemblyScriptのバージョンによるNullabilityの違いで変わったもので、`src/generated/schema.ts`ファイル(デフォルトパス、変更されているかもしれません)に関連しています。 +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### その他 @@ -520,5 +520,5 @@ type MyEntity @entity { - Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) - Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- タイプ `i8`/`u8` または `i16`/`u16` の小さな整数値をシフトする場合、最小の 3 つ、それぞれ 4 つだけRHS 値の上位 5 ビットのみが影響を受ける `i32.shl` の結果と同様に、RHS 値の有効ビットが結果に影響します。例: `someI8 << 8` は以前は値 `0` を生成していましたが、RHS を `8 & 7 = 0` としてマスクするため、`someI8` を生成するようになりました。(3 ビット) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) - Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From e902fc9184d0b5d440d4ee63ef6e3c1b3423f264 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:20 -0500 Subject: [PATCH 0463/1534] New translations assemblyscript-migration-guide.mdx (Portuguese) --- .../assemblyscript-migration-guide.mdx | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/website/src/pages/pt/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/pt/resources/release-notes/assemblyscript-migration-guide.mdx index ce410b9ed255..165055c46822 100644 --- a/website/src/pages/pt/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/pt/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: Guia de Migração do AssemblyScript --- -Até agora, os subgraphs têm usado uma das [primeiras versões do AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finalmente, adicionamos apoio à versão [mais recente disponível](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v.0.19.10)! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 Isto permitirá que os programadores de subgraph usem recursos mais novos da linguagem AS e da sua biblioteca normal. -Este guia se aplica a quem usar o `graph-cli`/`graph-ts` antes da versão `0.22.0`. Se já está numa versão maior (ou igual) àquela, já está a usar a versão `0.19.10` do AssemblyScript 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> Nota: Desde a versão `0.24.0`, o `graph-node` pode apoiar ambas as versões, dependente da `apiVersion` especificada no manifest do subgraph. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Recursos ### Novas funcionalidades -- `TypedArray`s podem ser construídos de `ArrayBuffer`s com o novo método estático [ `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Novas funções de biblioteca normais: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` e `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Suporte para x GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- `StaticArray`, uma variante de arranjo mais eficiente ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implementado o argumento `radix` no `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Suporte para separadores em literais de ponto flutuante ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Suporte para funções de primeira classe ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Embutidos: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Suporte para strings literais de modelos ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- `encodeURI(Component)` e `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- `toString`, `toDateString` e `toTimeString` ao `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- `toUTCString` para a `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Tipo embutido `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Otimizações -- Funções `Math` como `exp`, `exp2`, `log`, `log2` e `pow` foram substituídas por variantes mais rápidas ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Otimizado levemente o `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cacheing de mais acessos de campos em Map e Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Otimização para poderes de dois no `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Outros -- O tipo de um literal de arranjos agora pode ser inferido dos seus conteúdos ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- stdlib atualizado ao Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## Como atualizar? -1. Mude os seus mapeamentos de `apiVersion` no `subgraph.yaml` para `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. Atualize o `graph-cli` que usa à versão mais recente (`latest`) com: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # caso o tenha instalado globalmente @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Faça o mesmo para o `graph-ts`, mas em vez de instalar globalmente, salve-o nas suas dependências principais: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. Siga o resto do guia para consertar as mudanças frágeis na linguagem. -5. Execute `codegen` e `deploy` novamente. +5. Run `codegen` and `deploy` again. ## Breaking changes (mudanças frágeis) @@ -106,11 +106,11 @@ let maybeValue = load()! // breaks in runtime if value is null maybeValue.aMethod() ``` -Se não tiver certeza de qual escolher, é sempre bom usar a versão segura. Se o valor não existir, pode fazer uma declaração `if` precoce com um retorno no seu handler de subgraph. +Se não tiver certeza de qual escolher, é sempre bom usar a versão segura. Se o valor não existir, pode fazer uma declaração if precoce com um retorno no seu handler de subgraph. ### Sombreamento Varíavel -Antes, ao fazer [sombreamentos variáveis](https://en.wikipedia.org/wiki/Variable_shadowing), códigos assim funcionavam bem: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -Para resolver isto, basta mudar a declaração `if` para algo assim: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ O mesmo acontece se fizer o != em vez de ==. ### Casting (Conversão de tipos) -Antigamente, casting era normalmente feito com a palavra-chave `as`, assim: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray Porém, isto só funciona em dois casos: -- Casting primitivo (entre tipos como `u8`, `i32`, `bool`; por ex.: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting em herança de classe (subclass → superclass) Exemplos: @@ -177,55 +177,55 @@ let c: usize = a + (b as usize) ``` ```typescript -// upcasting em herança de classe +// upcasting on class inheritance class Bytes extends Uint8Array {} let bytes = new Bytes(2) -// bytes // mesmo que: bytes como Uint8Array +// bytes // same as: bytes as Uint8Array ``` -Há dois cenários onde casting é possível, mas usar `as`/`var` **não é seguro**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - Downcasting em herança de classe (superclass → subclass) - Entre dois tipos que compartilham uma superclasse ```typescript -// downcasting em herança de classe +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -// uint8Array // quebra no runtime :( +// uint8Array // breaks in runtime :( ``` ```typescript -// entre dois tipos que compartilham uma superclasse +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -// bytes // quebra no runtime :( +// bytes // breaks in runtime :( ``` -Nestes casos, vale usar a função `changetype`: +For those cases, you can use the `changetype` function: ```typescript -// downcasting em herança de classe +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -changetype(uint8Array) // funciona :) +changetype(uint8Array) // works :) ``` ```typescript -// entre dois tipos que compartilham uma superclasse +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -changetype(bytes) // funciona :) +changetype(bytes) // works :) ``` -Se só quiser tirar a anulabilidade, pode continuar a usar o operador `as` (ou `variable`), mas tenha ciência de que sabe que o valor não pode ser nulo, ou ele falhará. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // remove anulabilidade @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -Para o caso de anulabilidade, é bom dar uma olhada no [recurso de verificação de anulabilidade](https://www.assemblyscript.org/basics.html#nullability-checks), pois ele deixará o seu código mais limpinho 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Também adicionamos alguns métodos estáticos em alguns tipos para facilitar o casting, sendo: @@ -249,7 +249,7 @@ Também adicionamos alguns métodos estáticos em alguns tipos para facilitar o ### Checagem de anulabilidade com acesso à propriedade -Para usar a [checagem de anulabilidade](https://www.assemblyscript.org/basics.html#nullability-checks), dá para usar declarações `if` ou o operador ternário (`?` e `:`) assim: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -Mas isto só funciona ao fazer o ternário `if` / numa variável, e não num acesso de propriedade, assim: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -Inicialize o valor `total.amount`, porque se tentar acessar como na última linha para a soma, ele irá travar. Então — ou inicializas primeiro: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Ou pode simplesmente mudar o seu schema GraphQL para que não use um tipo anulável para esta propriedade, e o inicialize como zero no passo `codegen` 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -O compilador dará em erro, porque precisa adicionar um iniciador às propriedades que são classes, ou adicionar o operador `!`: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### Iniciação de arranjo -A classe `Array` (arranjo) ainda aceita um número para iniciar o comprimento da lista, mas tome cuidado — porque operações como `.push` aumentarão o tamanho em vez de adicionar ao começo, por exemplo: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -465,7 +465,7 @@ Dependendo dos tipos que usa, por ex., anuláveis, e como os acessa, pode encont ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -Para empurrar no começo, inicialize o `Array` com o tamanho zero, assim: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### Schema GraphQL -Isto não é uma mudança direta no AssemblyScript, mas pode ser que precise atualizar o seu arquivo `schema.graphql`. +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. Agora não há mais como definir campos nos seus tipos que são Listas Não Anuláveis. Se tiver um schema como este: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -Adicione um `!` ao membro do tipo de Lista, como: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -Isto mudou por diferenças de anulabilidade entre versões do AssemblyScript, e tem relação ao arquivo `src/generated/schema.ts` (caminho padrão, talvez tenha mudado). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). -### Outras informações +### Outros -- Alinhados `Map#set` e `Set#add`, com retorno de `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Os arranjos não herdam mais do ArrayBufferView, mas agora são distintos ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Classes inicializadas de literais de objeto não podem mais definir um construtor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- O resultado de uma operação binária, se ambos os operandos forem inteiros, `**` agora é o inteiro denominador comum. Antes, o resultado era um float, como se chamasse o `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coagir o `NaN` ao `false` ao converter em `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Ao mudar um valor inteiro pequeno do tipo `i8`/`u8` ou `i16`/`u16`, apenas os 3 respectivamente 4 bits menos significantes do valor RHS afetarão o resultado, análogo ao resultado de um `i32.shl` só a ser afetado pelos 5 bits menos significantes do valor RHS. Por exemplo: `someI8 << 8` antes produzia o valor `0`, mas agora produz o `somel8` por mascarar o RHS como `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Consertado um erro de comparações relacionais de string quando os tamanhos diferem ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From d5c3fa0a11717f5b9031abc181aa56ec98019d81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:21 -0500 Subject: [PATCH 0464/1534] New translations assemblyscript-migration-guide.mdx (Russian) --- .../assemblyscript-migration-guide.mdx | 126 +++++++++--------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/website/src/pages/ru/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/ru/resources/release-notes/assemblyscript-migration-guide.mdx index af444be7a6e3..c52b3b97cda2 100644 --- a/website/src/pages/ru/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/ru/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: Руководство по миграции AssemblyScript --- -До сих пор для субграфов использовалась одна из [первых версий AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Наконец, мы добавили поддержку [последней доступной версии](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 Это позволит разработчикам субграфов использовать более новые возможности языка AS и стандартной библиотеки. -Это руководство применимо для всех, кто использует `graph-cli`/`graph-ts` версии ниже `0.22.0`. Если у Вас уже есть версия выше (или равная) этой, значит, Вы уже использовали версию `0.19.10` AssemblyScript 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> Примечание. Начиная с `0.24.0`, `graph-node` может поддерживать обе версии, в зависимости от `apiVersion`, указанного в манифесте субграфа. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Особенности ### Новый функционал -- Теперь `TypedArray` можно создавать, используя `ArrayBuffer`6 с помощью [нового статического метода `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Новые функции стандартной библиотеки: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` и `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Добавлена поддержка x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Добавлен `StaticArray`, более эффективный вариант массива ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Добавлен `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Реализован аргумент `radix` для `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Добавлена поддержка разделителей в литералах с плавающей точкой ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Добавлена поддержка функций первого класса ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Добавление встроенных модулей: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Внедрение `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Добавлена поддержка литеральных строк шаблона ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Добавление `encodeURI(Component)` и `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Добавление `toString`, `toDateString` и `toTimeString` к `Date` ([v0.18.29](https://github.com/ AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Добавление `toUTCString` для `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Добавление встроенного типа `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Оптимизации -- Функции `Math`, такие как `exp`, `exp2`, `log`, `log2` и `pow` были заменены более быстрыми вариантами ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Проведена небольшая оптимизация `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Кэширование большего количества обращений к полям в std Map и Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Оптимизация по двум степеням в `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Прочее -- Тип литерала массива теперь можно определить по его содержимому ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Стандартная библиотека обновлена до версии Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## Как выполнить обновление? -1. Измените мэппинги `apiVersion` в `subgraph.yaml` на `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. Обновите используемый Вами `graph-cli` до `latest` версии, выполнив: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # если он у Вас установлен глобально @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Сделайте то же самое для `graph-ts`, но вместо глобальной установки сохраните его в своих основных зависимостях: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. Следуйте остальной части руководства, чтобы исправить языковые изменения. -5. Снова запустите `codegen` и `deploy`. +5. Run `codegen` and `deploy` again. ## Критические изменения @@ -110,7 +110,7 @@ maybeValue.aMethod() ### Затенение переменных -Раньше можно было сделать [затенение переменных](https://en.wikipedia.org/wiki/Variable_shadowing) и код, подобный этому, работал: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -Чтобы решить эту проблему, Вы можете просто изменить оператор `if` на что-то вроде этого: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i ### Кастинг -Раньше для кастинга обычно использовалось ключевое слово `as`, например: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray Однако это работает только в двух случаях: -- Примитивный кастинг (между такими типами, как `u8`, `i32`, `bool`; например: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Укрупнение по наследованию классов (subclass → superclass) Примеры: @@ -177,55 +177,55 @@ let c: usize = a + (b as usize) ``` ```typescript -// укрупнение по наследованию классов +// upcasting on class inheritance class Bytes extends Uint8Array {} let bytes = new Bytes(2) -// bytes // то же, что: bytes as Uint8Array +// bytes // same as: bytes as Uint8Array ``` -Есть два сценария, в которых Вы можете захотеть выполнить преобразование, но использовать `as`/`var` **небезопасно**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - Понижение уровня наследования классов (superclass → subclass) - Между двумя типами, имеющими общий супер класс ```typescript -// понижение уровня наследования классов +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -// uint8Array // перерывы в работе :( +// uint8Array // breaks in runtime :( ``` ```typescript -// между двумя типами, имеющими общий суперкласс +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -// bytes // перерывы в работе :( +// bytes // breaks in runtime :( ``` -В таких случаях можно использовать функцию `changetype`: +For those cases, you can use the `changetype` function: ```typescript -// понижение уровня наследования классов +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -changetype(uint8Array) // работает :) +changetype(uint8Array) // works :) ``` ```typescript -// между двумя типами, имеющими общий суперкласс +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -changetype(bytes) // работает :) +changetype(bytes) // works :) ``` -Если Вы просто хотите удалить значение NULL, Вы можете продолжать использовать оператор `as` (или `variable`), но помните, что значение не может быть нулевым, иначе оно сломается. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // удалить значение NULL @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -В случае обнуления мы рекомендуем Вам обратить внимание на [функцию проверки обнуления](https://www.assemblyscript.org/basics.html#nullability-checks), это сделает ваш код чище 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Также мы добавили еще несколько статических методов в некоторые типы, чтобы облегчить кастинг: @@ -249,7 +249,7 @@ let newBalance = new AccountBalance(balanceId) ### Проверка нулевого значения с доступом к свойству -Чтобы применить [функцию проверки на нулевое значение](https://www.assemblyscript.org/basics.html#nullability-checks), Вы можете использовать операторы `if` или тернарный оператор (`?` и `:`) следующим образом: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -Однако это работает только тогда, когда Вы выполняете `if` / тернарную операцию для переменной, а не для доступа к свойству, например: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -Вам необходимо убедиться, что значение `total.amount` инициализировано, потому что, если Вы попытаетесь получить доступ к сумме, как в последней строке, произойдет сбой. Таким образом, Вы либо инициализируете его первым: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Или Вы можете просто изменить свою схему GraphQL, чтобы не использовать тип, допускающий значение NULL для этого свойства. Тогда мы инициализируем его нулем на этапе `codegen` 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -Компилятор выдаст ошибку, потому что Вам нужно либо добавить инициализатор для свойств, являющихся классами, либо добавить оператор `!`: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### Инициализация массива -Класс `Array` по-прежнему принимает число для инициализации длины списка, однако Вам следует соблюдать осторожность, поскольку такие операции, как `.push`, фактически увеличивают размер, а не добавляют его в начало, например: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -465,7 +465,7 @@ arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -Для того чтобы фактически начать, Вы должны либо инициализировать `Array` нулевым размером, следующим образом: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### Схема GraphQL -Это не прямое изменение AssemblyScript, но Вам, возможно, придется обновить файл `schema.graphql`. +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. Теперь Вы больше не можете определять поля в своих типах, которые являются списками, не допускающими значение NULL. Если у Вас такая схема: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -Вам нужно добавить `!` к элементу типа List, например, так: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -Изменение произошло из-за различий в допустимости значений NULL между версиями AssemblyScript и связано с файлом `src/generated/schema.ts` (путь по умолчанию, возможно, Вы его изменили). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### Прочее -- `Map#set` и `Set#add` согласованы со спецификацией, произведён возврат к `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Массивы больше не наследуются от ArrayBufferView, а являются самостоятельными ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Классы, инициализируемые из объектных литералов, больше не могут определять конструктор ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Результатом бинарной операции `**` теперь является целое число с общим знаменателем, если оба операнда являются целыми числами. Раньше результатом было число с плавающей запятой, как при вызове `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Приведение `NaN` к `false` при преобразовании в `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- При сдвиге небольшого целочисленного значения типа `i8`/`u8` или `i16`/`u16`, на результат влияют только соответственно 3 или 4 младших разряда значения RHS, аналогично тому, как при сдвиге `i32.shl` на результат влияют только 5 младших разрядов значения RHS. Пример: `someI8 << 8` ранее выдавал значение `0`, но теперь выдает значение `someI8` благодаря маскировке RHS как `8 & 7 = 0` (3 бита) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Исправлена ошибка сравнения реляционных строк при разных размерах ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From f5b29fe5da9fc46df82843bfe02ffeac91fd6191 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:22 -0500 Subject: [PATCH 0465/1534] New translations assemblyscript-migration-guide.mdx (Swedish) --- .../assemblyscript-migration-guide.mdx | 208 +++++++++--------- 1 file changed, 104 insertions(+), 104 deletions(-) diff --git a/website/src/pages/sv/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/sv/resources/release-notes/assemblyscript-migration-guide.mdx index 97c6bb95635a..fc50d5646a33 100644 --- a/website/src/pages/sv/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/sv/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: AssemblyScript Migrationsguide --- -Hittills har undergrafar använt en av de [första versionerna av AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Äntligen har vi lagt till stöd för den [nyaste tillgängliga versionen](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 Det kommer att möjliggöra för undergrafutvecklare att använda nyare funktioner i AS-språket och standardbiblioteket. -Denna guide är tillämplig för alla som använder `graph-cli`/`graph-ts` version `0.22.0` eller lägre. Om du redan är på en högre version än (eller lika med) det, har du redan använt version `0.19.10` av AssemblyScript 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> Observera: Från och med `0.24.0` kan `graph-node` stödja båda versionerna, beroende på `apiVersion` som anges i undergrafens manifest. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Funktioner ### Ny funktionalitet -- `TypedArray`s kan nu skapas från `ArrayBuffer`s med hjälp av [det nya `wrap`-statiska metoden](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nya standardbiblioteksfunktioner: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` och `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Lagt till stöd för x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Lagt till `StaticArray`, en mer effektiv varian av en array ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Lagt till `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implementerat `radix`-argumentet på `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Lagt till stöd för avskiljare i flyttal ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Lagt till stöd för funktioner av första klass ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Lägg till inbyggda funktioner: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implementera `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Lagt till stöd för mallliteralsträngar ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Lägg till `encodeURI(Component)` och `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Lägg till `toString`, `toDateString` och `toTimeString` för `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Lägg till `toUTCString` för `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Lägg till inbyggd typ `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Optimeringar -- `Math`-funktioner som `exp`, `exp2`, `log`, `log2` och `pow` har ersatts med snabbare varianter ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Lätt optimering av `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cachea fler fältåtkomster i std Map och Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimering för potenser av två i `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Annat -- Typen för en arrayliteral kan nu härledas från dess innehåll ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Uppdaterad stdlib till Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## Hur uppgraderar du? -1. Ändra dina mappningar `apiVersion` i `subgraph.yaml` till `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. Uppdatera `graph-cli` som du använder till den `nyaste` versionen genom att köra: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # om du har den globalt installerad @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Gör samma sak för `graph-ts`, men istället för att installera globalt, spara den i dina huvudberoenden: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. Följ resten av guiden för att åtgärda språkbrytande ändringar. -5. Kör `codegen` och `deploy` igen. +5. Run `codegen` and `deploy` again. ## Språkbrytande ändringar @@ -91,17 +91,17 @@ maybeValue.aMethod(); Men i den nyare versionen, eftersom värdet är nullable, måste du kontrollera, så här: ```typescript -let maybeValue = load() +let maybeValue = load(); if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore + maybeValue.aMethod(); // `maybeValue` is not null anymore } ``` Eller gör så här: ```typescript -let maybeValue = load()! // bryts i runtime om värdet är null +let maybeValue = load()!; // bryts i runtime om värdet är null maybeValue.aMethod() ``` @@ -110,11 +110,11 @@ Om du är osäker på vilken du ska välja, rekommenderar vi alltid att använda ### Variabelskuggning -Tidigare kunde du använda [variabelskuggning](https://en.wikipedia.org/wiki/Variable_shadowing) och kod som detta skulle fungera: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript -let a = 10 -let b = 20 +let a = 10; +let b = 20; let a = a + b ``` @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -För att lösa problemet kan du helt enkelt ändra `if`-satsen till något i den här stilen: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,24 +155,24 @@ Samma gäller om du använder != istället för ==. ### Kasting -Det vanliga sättet att göra kasting tidigare var att bara använda nyckelordet `as`, som så här: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // motsvarande: byteArray +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray ``` Detta fungerar dock endast i två scenarier: -- Primitiv kasting (mellan typer som `u8`, `i32`, `bool`; t.ex. `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Uppkasting vid klassarv (underklass → överklass) Exempel: ```typescript // primitive casting -let a: usize = 10 -let b: isize = 5 +let a: usize = 10; +let b: isize = 5; let c: usize = a + (b as usize) ``` @@ -184,13 +184,13 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -Det finns två scenarier där du kan vilja casta, men att använda `as`/`var` **är inte säkert**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - Downcasting vid arv av klasser (superklass → subklass) - Mellan två typer som delar en superklass ```typescript -// downcasting om klassarv +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) @@ -198,7 +198,7 @@ let uint8Array = new Uint8Array(2) ``` ```typescript -// mellan två typer som delar en superklass +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} @@ -206,10 +206,10 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -I dessa fall kan du använda funktionen `changetype`: +For those cases, you can use the `changetype` function: ```typescript -// downcasting om klassarv +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) @@ -217,7 +217,7 @@ changetype(uint8Array) // works :) ``` ```typescript -// mellan två typer som delar en superklass +// between two types that share a superclass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} @@ -225,20 +225,20 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -Om du bara vill ta bort nullability kan du fortsätta använda `as`-operatorn (eller `variable`), men se till att du vet att värdet inte kan vara null, annars kommer det att bryta. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // ta bort ogiltighet -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null +let previousBalance = AccountBalance.load(balanceId); // AccountBalance | null if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null + return previousBalance as AccountBalance; // safe remove null } let newBalance = new AccountBalance(balanceId) ``` -För nullbarhetsfallet rekommenderar vi att du tittar på [nullbarhetskontrollfunktionen](https://www.assemblyscript.org/basics.html#nullability-checks), den kommer att göra din kod renare 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Vi har också lagt till några fler statiska metoder i vissa typer för att underlätta kastning, de är: @@ -249,35 +249,35 @@ Vi har också lagt till några fler statiska metoder i vissa typer för att unde ### Kontroll av nollställbarhet med tillgång till egendom -För att använda [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) kan du använda antingen `if`-satser eller den ternära operatorn (`?` och `:`) så här: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript -let something: string | null = 'data' +let something: string | null = "data"; -let somethingOrElse = something ? something : 'else' +let somethingOrElse = something ? something : "else"; // or -let somethingOrElse +let somethingOrElse; if (something) { - somethingOrElse = something + somethingOrElse = something; } else { - somethingOrElse = 'else' + somethingOrElse = "else"; } ``` -Men det fungerar bara när du gör `if` / ternary på en variabel, inte på en egenskapstillgång, som den här: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { data: string | null } -let container = new Container() -container.data = 'data' +let container = new Container(); +container.data = "data"; -let somethingOrElse: string = container.data ? container.data : 'else' // Kompilerar inte +let somethingOrElse: string = container.data ? container.data : "else"; // Kompilerar inte ``` Vilket ger detta fel: @@ -296,12 +296,12 @@ class Container { data: string | null } -let container = new Container() -container.data = 'data' +let container = new Container(); +container.data = "data"; -let data = container.data +let data = container.data; -let somethingOrElse: string = data ? data : 'else' // kompilerar helt okej :) +let somethingOrElse: string = data ? data : "else"; // kompilerar helt okej :) ``` ### Operatörsöverladdning med egenskapsaccess @@ -310,7 +310,7 @@ Om du försöker summera (till exempel) en nullable typ (från en property acces ```typescript class BigInt extends Uint8Array { - @operator('+') + @operator("+") plus(other: BigInt): BigInt { // ... } @@ -320,26 +320,26 @@ class Wrapper { public constructor(public n: BigInt | null) {} } -let x = BigInt.fromI32(2) -let y: BigInt | null = null +let x = BigInt.fromI32(2); +let y: BigInt | null = null; -x + y // ge kompileringsfel om ogiltighet +x + y; // ge kompileringsfel om ogiltighet -let wrapper = new Wrapper(y) +let wrapper = new Wrapper(y); -wrapper.n = wrapper.n + x // ger inte kompileringsfel som det borde +wrapper.n = wrapper.n + x; // ger inte kompileringsfel som det borde ``` Vi har öppnat en fråga om AssemblyScript-kompilatorn för detta, men om du gör den här typen av operationer i dina subgraf-mappningar bör du ändra dem så att de gör en null-kontroll innan den. ```typescript -let wrapper = new Wrapper(y) +let wrapper = new Wrapper(y); if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) + wrapper.n = BigInt.fromI32(0); } -wrapper.n = wrapper.n + x // nu är `n` garanterat ett BigInt +wrapper.n = wrapper.n + x; // nu är `n` garanterat ett BigInt ``` ### Initialisering av värde @@ -347,17 +347,17 @@ wrapper.n = wrapper.n + x // nu är `n` garanterat ett BigInt Om du har någon kod som denna: ```typescript -var value: Type // null -value.x = 10 -value.y = 'content' +var value: Type; // null +value.x = 10; +value.y = "content" ``` Det kommer att kompilera men brytas vid körning, det händer eftersom värdet inte har initialiserats, så se till att din subgraf har initialiserat sina värden, så här: ```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' +var value = new Type(); // initialized +value.x = 10; +value.y = "content" ``` Även om du har nullable properties i en GraphQL-entitet, som denna: @@ -372,29 +372,29 @@ type Total @entity { Och du har en kod som liknar den här: ```typescript -let total = Total.load('latest') +let total = Total.load("latest"); if (total === null) { - total = new Total('latest') + total = new Total("latest") } total.amount = total.amount + BigInt.fromI32(1) ``` -Du måste se till att initialisera värdet `total.amount`, för om du försöker komma åt som i den sista raden för summan, kommer det att krascha. Så antingen initialiserar du det först: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript -let total = Total.load('latest') +let total = Total.load("latest") if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) + total = new Total("latest") + total.amount = BigInt.fromI32(0); } total.tokens = total.tokens + BigInt.fromI32(1) ``` -Eller så kan du bara ändra ditt GraphQL-schema för att inte använda en nullable-typ för den här egenskapen, då initierar vi den som noll i `codegen` -steget 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -404,10 +404,10 @@ type Total @entity { ``` ```typescript -let total = Total.load('latest') +let total = Total.load("latest"); if (total === null) { - total = new Total('latest') // initierar redan icke-nullställbara egenskaper + total = new Total("latest"); // initierar redan icke-nullställbara egenskaper } total.amount = total.amount + BigInt.fromI32(1) @@ -425,7 +425,7 @@ export class Something { } ``` -Kompilatorn kommer att göra fel eftersom du antingen måste lägga till en initialiserare för de egenskaper som är klasser, eller lägga till operatorn `!`: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -435,23 +435,23 @@ export class Something { // or export class Something { - value: Thing + value: Thing; constructor(value: Thing) { - this.value = value + this.value = value; } } // or export class Something { - value!: Thing + value!: Thing; } ``` ### Initialisering av Array -Klassen `Array` accepterar fortfarande ett tal för att initiera längden på listan, men du bör vara försiktig eftersom operationer som `.push` faktiskt ökar storleken istället för att lägga till i början, till exempel: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -465,7 +465,7 @@ Beroende på vilka typer du använder, t.ex. nullable-typer, och hur du kommer ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -För att faktiskt trycka i början bör du antingen initiera `Array` med storlek noll, så här: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### GraphQL-schema -Detta är inte en direkt AssemblyScript-ändring, men du kan behöva uppdatera din `schema.graphql`-fil. +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. Nu kan du inte längre definiera fält i dina typer som är Non-Nullable Lists. Om du har ett schema som detta: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -Du måste lägga till en `!` till medlemmen i List-typen, så här: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -Detta ändrades på grund av skillnader i nullbarhet mellan AssemblyScript-versioner, och det är relaterat till filen `src/generated/schema.ts` (standardväg, du kanske har ändrat detta). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### Annat -- Jämnade `Map#set` och `Set#add` med specifikationen, som returnerar `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrayer ärver inte längre från ArrayBufferView, men är nu distinkta ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Klasser som initialiseras från objektlitteraler kan inte längre definiera en konstruktor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Resultatet av en binär `**`-operation är nu det gemensamma nämnaren för heltal om båda operanderna är heltal. Tidigare var resultatet ett flyttal som om man anropade `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Tvinga `NaN` till `false` vid kastning till `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- När du skiftar en liten heltalsvärde av typ `i8`/`u8` eller `i16`/`u16`, påverkar endast de 3 respektive 4 minst signifikanta bitarna i RHS-värdet resultatet, analogt med resultatet av en `i32.shl` som endast påverkas av de 5 minst signifikanta bitarna i RHS-värdet. Exempel: `someI8 << 8` producerade tidigare värdet `0`, men producerar nu `someI8` på grund av maskeringen av RHS som `8 & 7 = 0` (3 bitar) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Buggfix för relationella strängjämförelser när storlekarna skiljer sig ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From 7f52ac0d540996b1d773798c976f6977dabb6657 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:23 -0500 Subject: [PATCH 0466/1534] New translations assemblyscript-migration-guide.mdx (Turkish) --- .../assemblyscript-migration-guide.mdx | 260 +++++++++--------- 1 file changed, 130 insertions(+), 130 deletions(-) diff --git a/website/src/pages/tr/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/tr/resources/release-notes/assemblyscript-migration-guide.mdx index a8bb2e376807..acefdc46b80a 100644 --- a/website/src/pages/tr/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/tr/resources/release-notes/assemblyscript-migration-guide.mdx @@ -1,50 +1,50 @@ --- -title: AssemblyScript Migration Guide +title: AssemblyScript Geçiş Rehberi --- -Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Şu ana kadar subgraph'ler, [AssemblyScript'in ilk versiyonlarından birini](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6) kullanıyordu. Nihayet, [en yeni versiyonu](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) için destek ekledik! 🎉 -That will enable subgraph developers to use newer features of the AS language and standard library. +Bu, subgraph geliştiricilerinin AS dilinin ve standart kütüphanenin daha yeni özelliklerini kullanmasını sağlayacak. -This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 +Bu rehber, `graph-cli`/`graph-ts` araçlarının `0.22.0` ve öncesi versiyonlarını kullanan herkes için geçerlidir. Eğer halihazırda bu versiyonun üstünde (veya ona eşit) bir versiyon kullanıyorsanız, zaten AssemblyScript'in `0.19.10` versiyonunu kullanıyorsunuz demektir 🙂 -> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. +> Not: `0.24.0` itibarıyla, `graph-node`, subgraph manifestosunda belirtilen `apiVersion`'e bağlı olarak her iki versiyonu da destekleyebilir. -## Features +## Özellikler -### New functionality +### Yeni işlevsellik -- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`'ler artık [`wrap` isimli yeni statik metot](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) kullanılarak `ArrayBuffer`'lardan oluşturulabilir +- Yeni standart kütüphane fonksiyonları: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` ve `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- GenericClass'in bir örneğini doğrulamak için x instanceof GenericClass desteği eklendi ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Daha verimli bir dizi çeşidi olan `StaticArray` eklendi, ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- `Array#flat` eklendi ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- `Number#toString` fonksiyonunda `radix` argümanı desteklenmeye başlandı ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Kayan nokta literallerinde ayırıcılar için destek eklendi ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- İlk sınıf fonksiyonlar için destek eklendi ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Yerleşikler eklendi: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- `Array/TypedArray/String#at`' fonksiyonu geliştirildi ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Şablon literal dizeleri için destek eklendi ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- `encodeURI(Component)` ve `decodeURI(Component)` eklendi ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- `toString`, `toDateString` ve `toTimeString` metodları `Date`'e eklendi ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- `Date` için `toUTCString` eklendi ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- `nonnull/NonNullable` yerleşik türü eklendi ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) -### Optimizations +### Optimizasyonlar -- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` fonksiyonları olan `exp`, `exp2`, `log`, `log2` ve `pow`, daha hızlı varyantlarla değiştirildi ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- `Math.mod`' fonksiyonu biraz optimize edildi ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- std Map ve Set'te daha fazla alan erişiminin önbelleğe alınması sağlandı ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- İkinin katları için `ipow32/64` optimizasyonu ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -### Other +### Diğer -- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Bir dizi literalinin türü artık içeriğinden çıkarsanabilir ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- stdlib Unicode 13.0.0'a güncellendi ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -## How to upgrade? +## Nasıl yükseltilir? -1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: +1. Eşlemlerinizdeki `apiVersion` değerini `subgraph.yaml` dosyasında `0.0.6` olarak değiştirin: ```yaml ... @@ -56,30 +56,30 @@ dataSources: ... ``` -2. Update the `graph-cli` you're using to the `latest` version by running: +2. Kullanmakta olduğunuz `graph-cli`yi `latest` (en son) sürüme güncellemek için şu komutu çalıştırın: ```bash -# if you have it globally installed -npm install --global @graphprotocol/graph-cli@latest +# Eğer `graph-cli` global olarak yüklüyse +npm install --global @graphprotocol/graph-cli@latest -# or in your subgraph if you have it as a dev dependency +# veya `subgraph` içinde bir geliştirme gereksinimi olarak yüklüyse npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: +3. Aynısını `graph-ts` için de yapın, ancak global olarak kurmak yerine ana gereksinimlerinizde kaydedin: ```bash npm install --save @graphprotocol/graph-ts@latest ``` -4. Follow the rest of the guide to fix the language breaking changes. -5. Run `codegen` and `deploy` again. +4. Dildeki uyumsuzluk sorunlarını düzeltmek için rehberin geri kalanını takip edin. +5. `codegen` ve `deploy` komutlarını tekrar çalıştırın. -## Breaking changes +## Uyumsuz değişiklikler -### Nullability +### Null olabilme durumu -On the older version of AssemblyScript, you could create code like this: +AssemblyScript'in eski bir sürümünde şu şekilde kod oluşturabilirdiniz: ```typescript function load(): Value | null { ... } @@ -88,29 +88,29 @@ let maybeValue = load(); maybeValue.aMethod(); ``` -However on the newer version, because the value is nullable, it requires you to check, like this: +Ancak, daha yeni sürümde değer null olabildiği için bunu kontrol etmenizi gerektirir, aşağıdaki gibi: ```typescript let maybeValue = load() if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore + maybeValue.aMethod() // `maybeValue` artık null değil } ``` -Or force it like this: +Veya bunu şu şekilde zorlayın: ```typescript -let maybeValue = load()! // breaks in runtime if value is null +let maybeValue = load()! // Değer null ise çalıştırma esnasında hata verir maybeValue.aMethod() ``` -If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. +Emin olamadığınızda daima güvenli sürümü kullanmanızı öneririz. Değer mevcut değilse, subgraph işleyicinizde erken bir if kontrolü yaparak işlemi sonlandırabilirsiniz. -### Variable Shadowing +### Değişken Gölgeleme -Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: +Daha önce [değişken gölgeleme](https://en.wikipedia.org/wiki/Variable_shadowing) yapabiliyordunuz ve aşağıdaki gibi bir kod çalışıyordu: ```typescript let a = 10 @@ -118,7 +118,7 @@ let b = 20 let a = a + b ``` -However now this isn't possible anymore, and the compiler returns this error: +Ancak bu artık mümkün değil ve bu kodu derlemeye çalışırsanız derleyici şu hatayı verir: ```typescript ERROR TS2451: Cannot redeclare block-scoped variable 'a' @@ -128,11 +128,11 @@ ERROR TS2451: Cannot redeclare block-scoped variable 'a' in assembly/index.ts(4,3) ``` -You'll need to rename your duplicate variables if you had variable shadowing. +Eğer değişken gölgeleme yapıyorsanız, yinelenen değişkenlerinizi yeniden adlandırmanız gerekecek. -### Null Comparisons +### Null Karşılaştırmaları -By doing the upgrade on your subgraph, sometimes you might get errors like these: +Subgraph'inizi yükselttikten sonra bazı noktalarda şu tür hatalar alabilirsiniz: ```typescript ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. @@ -141,122 +141,122 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -To solve you can simply change the `if` statement to something like this: +Bu hatayı çözmek için `if` ifadesini şu şekilde değiştirebilirsiniz: ```typescript if (!decimals) { - // or + // veya if (decimals === null) { ``` -The same applies if you're doing != instead of ==. +Aynısı == yerine != kullandığınızda da geçerlidir. -### Casting +### Dönüştürme -The common way to do casting before was to just use the `as` keyword, like this: +Önceden dönüştürme yapmanın yaygın yolu, `as` kelimesini şöyle kullanmaktı: ```typescript let byteArray = new ByteArray(10) let uint8Array = byteArray as Uint8Array // equivalent to: byteArray ``` -However this only works in two scenarios: +Ancak bu sadece iki senaryoda çalışır: -- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- Upcasting on class inheritance (subclass → superclass) +- Temel tür dönüşümü (`u8`, `i32`, `bool` gibi veri türleri arasında; örneğin: `let b: isize = 10; b as usize`); +- Sınıf kalıtımında yukarı doğru dönüşüm (alt sınıf → üst sınıf) Örnekler: ```typescript -// primitive casting +// temel tür dönüşümü let a: usize = 10 let b: isize = 5 let c: usize = a + (b as usize) ``` ```typescript -// upcasting on class inheritance +// sınıf kalıtımında yukarı doğru dönüşüm class Bytes extends Uint8Array {} let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array +// bytes // // aynı şöyle: bytes as Uint8Array ``` -There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: +`as`/`var` kullanmanın **güvenli olmadığı** iki dönüşüm senaryosu vardır: -- Downcasting on class inheritance (superclass → subclass) -- Between two types that share a superclass +- Sınıf kalıtımında aşağı doğru dönüşüm (üst sınıf → alt sınıf) +- Ortak bir üst sınıfa sahip iki tür arasında dönüşüm ```typescript -// downcasting on class inheritance +// sınıf kalıtımında aşağı doğru dönüşüm class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( +// uint8Array // çalıştırma esnasında kırılır :( ``` ```typescript -// between two types that share a superclass +// aynı üst sınıfı paylaşan iki tür arasında class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -// bytes // breaks in runtime :( +// bytes // çalıştırma esnasında kırılır :( ``` -For those cases, you can use the `changetype` function: +Bu durumlar için `changetype` fonksiyonunu kullanabilirsiniz: ```typescript -// downcasting on class inheritance +// sınıf kalıtımında aşağı doğru dönüşüm class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) +changetype(uint8Array) // çalışır :) ``` ```typescript -// between two types that share a superclass +// Aynı üst sınıfı paylaşan iki tür arasında class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -changetype(bytes) // works :) +changetype(bytes) // çalışır :) ``` -If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. +Sadece null olabilmeyi kaldırmak istiyorsanız, `as` operatörünü (veya `variable`) kullanmaya devam edebilirsiniz. Ancak değerin null olamayacağını bilmeniz gerekir. Aksi halde bu sorun yaratır. ```typescript -// remove nullability +// null olabilmeyi kaldırma let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null + return previousBalance as AccountBalance // güvenli null kaldırma } let newBalance = new AccountBalance(balanceId) ``` -For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 +Null olabilme durumu için [null olabilme kontrolü özelliğine](https://www.assemblyscript.org/basics.html#nullability-checks) göz atmanızı öneririz. Bu özellikleri uygulamak, kodunuzu daha temiz hale getirecektir 🙂 -Also we've added a few more static methods in some types to ease casting, they are: +Ayrıca, dönüştürme işlemlerini kolaylaştırmak için bazı türlere birkaç statik metot daha ekledik, bunlar: - Bytes.fromByteArray - Bytes.fromUint8Array - BigInt.fromByteArray - ByteArray.fromBigInt -### Nullability check with property access +### Özellik erişimi ile null olabilme kontrolü -To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: +[Null kontrolü özelliğini](https://www.assemblyscript.org/basics.html#nullability-checks) kullanmak için, `if` ifadelerini veya üçlü operatörünü (`?` ve `:`) şu şekilde kullanabilirsiniz: ```typescript let something: string | null = 'data' let somethingOrElse = something ? something : 'else' -// or +// veya let somethingOrElse @@ -267,7 +267,7 @@ if (something) { } ``` -However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: +Ancak bu yalnızca bir değişken üzerinde `if` / üçlü operatör kullandığınızda çalışır, bir özellik erişiminde değil: ```typescript class Container { @@ -277,10 +277,10 @@ class Container { let container = new Container() container.data = 'data' -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +let somethingOrElse: string = container.data ? container.data : 'else' // derlemede hata verir ``` -Which outputs this error: +Bu hata çıktısını verir: ```typescript ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. @@ -289,7 +289,7 @@ ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/s ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``` -To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: +Bu hatayı çözmek için, özellik erişimini bir değişkene atayarak derleyicinin null kontrolünü yapabilmesini sağlayabilirsiniz: ```typescript class Container { @@ -301,12 +301,12 @@ container.data = 'data' let data = container.data -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +let somethingOrElse: string = data ? data : 'else' // sorunsuz bir şekilde derlenir :) ``` -### Operator overloading with property access +### Özellik erişimi ile operatör aşırı yükleme -If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. +Bir özelliğe erişimden gelen bir null değere izin veren bir türü, null olmayan bir türle toplamaya çalışırsanız AssemblyScript derleyicisi, bu değerlerden birinin null değere izin verdiği konusunda derleme zamanı uyarısı vermez. Bunun yerine, sadece sessiz bir şekilde derlemeye devam eder ve çalışma sırasında kodun kırılmasına yol açar. ```typescript class BigInt extends Uint8Array { @@ -323,14 +323,14 @@ class Wrapper { let x = BigInt.fromI32(2) let y: BigInt | null = null -x + y // give compile time error about nullability +x + y // null değer alabilme hakkında derleme hatası verir let wrapper = new Wrapper(y) -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +wrapper.n = wrapper.n + x // olması gerektiği gibi derleme hatası vermez ``` -We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. +AssemblyScript derleyicisine bu sorunu bildirdik. Ancak subgraph eşlemlerinizde bu tür işlemleri yapıyorsanız, şimdilik önce bir null değer kontrolü yapacak şekilde kodunuzu değiştirmelisiniz. ```typescript let wrapper = new Wrapper(y) @@ -339,12 +339,12 @@ if (!wrapper.n) { wrapper.n = BigInt.fromI32(0) } -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +wrapper.n = wrapper.n + x // şimdi `n`'nin bir BigInt olduğu garanti edilmiştir ``` -### Value initialization +### Değer ilklendirme -If you have any code like this: +Bunun gibi bir koda sahipseniz: ```typescript var value: Type // null @@ -352,15 +352,15 @@ value.x = 10 value.y = 'content' ``` -It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: +Kod derlenir ancak çalıştırma esnasında kırılır. Bu da değerin ilklendirilmemiş olmasından kaynaklanır. Bu yüzden subgraph'inizin değerlerini aşağıdaki gibi ilklendirdiğinden emin olun: ```typescript -var value = new Type() // initialized +var value = new Type() // ilklendirme value.x = 10 value.y = 'content' ``` -Also if you have nullable properties in a GraphQL entity, like this: +Ayrıca, bir GraphQL varlığında şunun gibi null olabilen özellikleriniz varsa: ```graphql type Total @entity { @@ -369,7 +369,7 @@ type Total @entity { } ``` -And you have code similar to this: +Ve bu koda benzer bir koda sahipseniz: ```typescript let total = Total.load('latest') @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: +`total.amount` değerini ilklendirdiğinizden emin olmanız gerekecek, çünkü toplama işlemi için son satırda erişmeye çalışırsanız, uygulama çöker. Bu yüzden ya önce ilklendirin: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 +Ya da GraphQL şemanızı bu özellik için null olabilen bir tür kullanmayacak şekilde değiştirin. Böylece `codegen` adımında bu değer sıfır olarak ilklendirilir 😉 ```graphql type Total @entity { @@ -407,15 +407,15 @@ type Total @entity { let total = Total.load('latest') if (total === null) { - total = new Total('latest') // already initializes non-nullable properties + total = new Total('latest') // null olamayan özellikleri ilklendirir } total.amount = total.amount + BigInt.fromI32(1) ``` -### Class property initialization +### Sınıf özelliği ilklendirme -If you export any classes with properties that are other classes (declared by you or by the standard library) like this: +Eğer (kendi tarafınızdan veya standart kütüphane tarafından bildirilen) başka sınıfları içeren özelliklere sahip sınıfları şu şekilde dışa aktarırsanız: ```typescript class Thing {} @@ -425,14 +425,14 @@ export class Something { } ``` -The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: +Derleyici hata verecektir çünkü sınıf olan özellikler için ya bir ilklendirici eklemeniz ya da `!` operatörünü eklemeniz gerekmektedir: ```typescript export class Something { constructor(public value: Thing) {} } -// or +// veya export class Something { value: Thing @@ -442,30 +442,30 @@ export class Something { } } -// or +// veya export class Something { value!: Thing } ``` -### Array initialization +### Dizi ilklendirmesi -The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: +`Array` sınıfı, listenin uzunluğunu ilklendirmek için bir sayı kabul etmeye devam eder. Ancak `.push` gibi işlemler başlangıca eklemek yerine boyutu gerçekten artıracağı için dikkatli olmalısınız, örneğin: ```typescript let arr = new Array(5) // ["", "", "", "", ""] -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +arr.push('something') // ["", "", "", "", "", "something"] // uzunluk 6 :( ``` -Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: +Kullandığınız tiplere bağlı olarak -mesela null olabilen değerler kullanıyorsunuz- ve bu tiplere erişim şeklinize göre, aşağıdaki gibi bir çalıştırma hatasıyla karşılaşabilirsiniz: ``` ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -To actually push at the beginning you should either, initialize the `Array` with size zero, like this: +Listenin başına "push" yapabilmek için, `Array`'i ya şu şekilde sıfır boyutla ilklendirmelisiniz: ```typescript let arr = new Array(0) // [] @@ -473,7 +473,7 @@ let arr = new Array(0) // [] arr.push('something') // ["something"] ``` -Or you should mutate it via index: +Ya da dizin kullanarak değiştirmelisiniz: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -481,11 +481,11 @@ let arr = new Array(5) // ["", "", "", "", ""] arr[0] = 'something' // ["something", "", "", "", ""] ``` -### GraphQL schema +### GraphQL şeması -This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. +Bu doğrudan bir AssemblyScript değişikliği değildir. Ancak `schema.graphql` dosyanızı güncellemeniz gerekebilir. -Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: +Artık türlerinizde Non-Nullable Lists (Boş Olamayan Listeler) olarak alanlar tanımlayamazsınız. Eğer böyle bir şemanız varsa: ```graphql type Something @entity { @@ -494,11 +494,11 @@ type Something @entity { type MyEntity @entity { id: Bytes! - invalidField: [Something]! # no longer valid + invalidField: [Something]! # artık geçerli değil } ``` -You'll have to add an `!` to the member of the List type, like this: +Listenin tür üyesine şu şekilde `!` eklemeniz gerekecek: ```graphql type Something @entity { @@ -507,18 +507,18 @@ type Something @entity { type MyEntity @entity { id: Bytes! - invalidField: [Something!]! # valid + invalidField: [Something!]! # geçerli } ``` -This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). +Bu değişiklik, AssemblyScript sürümleri arasındaki null olabilme farklılıklarından dolayı oldu. Ve bu değişiklik `src/generated/schema.ts` dosyasıyla ilgilidir (varsayılan yol, bunu değiştirmiş olabilirsiniz). -### Other +### Diğer -- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- `Map#set` ve `Set#add` ögelerini, `this` döndürecek şekilde spesifikasyonla uyumlu hale getirdik ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Diziler artık ArrayBufferView'den gelmek yerine artık farklı ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) sürümünden geliyorlar +- Nesne literalinden başlatılan sınıflar artık yapıcı bir metot tanımlayamaz ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Bir `**` ikili işleminin sonucu artık her iki işlenen de tamsayı ise ortak payda tamsayısıdır. Önceden, sonuç `Math/f.pow` çağrılmış gibi bir float'tı ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- `bool` dönüşümünde `NaN`' değeri `false` değerine zorla dönüştürülür ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- `i8`/`u8` ya da `i16`/`u16` tipindeki küçük tamsayı değerlerini kaydırırken, RHS değerinin yalnızca sırasıyla en düşük 3 ve 4 anlamlı biti sonucu etkiler. Bu, bir `i32.shl` sonucu yalnızca RHS değerinin 5 en düşük anlamlı bitinden etkilenmesine benzer. Örnek: `someI8 << 8` önceden `0` değerini üretirken, şimdi RHS'ın `8 & 7 = 0` (3 bit) olarak maskelenmesinden dolayı `someI8` değerini üretir ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Boyutlar farklı olduğunda ilişkisel dize karşılaştırmalarında olan hatanın düzeltmesi ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From ddba4702a93e77c1b779384e3804194f4ce9f037 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:25 -0500 Subject: [PATCH 0467/1534] New translations assemblyscript-migration-guide.mdx (Chinese Simplified) --- .../assemblyscript-migration-guide.mdx | 112 +++++++++--------- 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/website/src/pages/zh/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/zh/resources/release-notes/assemblyscript-migration-guide.mdx index 622bdeef307e..45d64ae2ead8 100644 --- a/website/src/pages/zh/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/zh/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: AssemblyScript 迁移指南 --- -到目前为止,子图一直在使用 [AssemblyScript 的第一个版本](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6) 之一。 最终,我们添加了对[最新版本](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) 的支持! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 -这将使子图开发人员能够使用 AS 语言和标准库的更新特性。 +That will enable subgraph developers to use newer features of the AS language and standard library. -本指南适用于使用 `0.22.0` 版本以下的 `graph-cli`/`graph-ts` 的任何人。 如果您已经使用了高于(或等于)该版本号的版本,那么您已经在使用 AssemblyScript 的 `0.19.10` 版本 🙂。 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> 注意:从 `0.24.0` 开始,`graph-node` 可以支持这两个版本,具体取决于子图清单文件中指定的 `apiVersion`。 +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## 特征 ### 新功能 -- `TypedArray`s 现在可以使用[新的`wrap`静态方法](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1))基于`ArrayBuffer`s 构建 -- 新的标准库函数: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`和`TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- 增加了对 x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2))的支持 -- 添加了 `StaticArray`, 一种更高效的数组变体 ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- 增加了 `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- 在`Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1))上实现了`radix` 参数 -- 添加了对浮点文字中的分隔符的支持 ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- 添加了对一级函数的支持 ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- 添加内置函数:`i32/i64/f32/f64.add/sub/mul` ([ v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- 实现了`Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- 添加了对模板文字字符串的支持([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- 添加了`encodeURI(Component)` 和 `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- 将 `toString`、`toDateString` 和 `toTimeString` 添加到 `Date` (\[v0.18.29\](https://github.com/ AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- 为`Date` 添加了`toUTCString`([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- 添加 `nonnull/NonNullable` 内置类型([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### 优化 -- `Math` 函数,例如 `exp`、`exp2`、`log`、`log2` 和 `pow` 已替换为更快的变体 ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- 些许优化了`Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- 在 std Map 和 Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) 中缓存更多字段访问 -- 在 `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2))中优化二的幂运算 +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### 其他 -- 现在可以从数组内容中推断出数组文字的类型([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- 将 stdlib 更新为 Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## 如何升级? -1. 将 `subgraph.yaml` 中的映射 `apiVersion` 更改为 `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. 通过运行以下命令,将您正在使用的 `graph-cli` 更新为 `latest` 版本: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # if you have it globally installed @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. 对 `graph-ts` 执行相同的操作,但不是全局安装,而是将其保存在您的主要依赖项中: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` -4. 参考指南的其余部分修复语言更改带来的问题。 -5. 再次运行 `codegen` 和 `deploy`。 +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. ## 重大变化 @@ -110,7 +110,7 @@ maybeValue.aMethod() ### 变量遮蔽 -在您可以进行 [变量遮蔽](https://en.wikipedia.org/wiki/Variable_shadowing) 之前,这样的代码可以工作: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -要解决此问题,您只需将 `if` 语句更改为如下所示代码: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i ### 强制转换 -以前,进行强制转换的常用方法是使用 `as`关键字,如下所示: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray 但是,这只适用于两种情况: -- 原始类型转换(在`u8`, `i32`, `bool`等类型之间; 例如: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - 在类继承时向上转换(子类 → 超类) 例子: @@ -184,7 +184,7 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -在两种情况下,您可能希望进行类型转换,但使用 `as`/`var` **并不安全**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - 在类继承时向下转换(超类 → 子类) - 在共享超类的两种类型之间 @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -对于这些情况,您可以使用 `changetype` 函数: +For those cases, you can use the `changetype` function: ```typescript // downcasting on class inheritance @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -对于可空性情况,我们建议查看[可空性检查功能](https://www.assemblyscript.org/basics.html#nullability-checks),它会让您的代码更简洁 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 我们还在某些类型中添加了一些静态方法来简化转换,它们是: @@ -249,7 +249,7 @@ let newBalance = new AccountBalance(balanceId) ### 使用属性访问进行可空性检查 -要使用 [可空性检查功能](https://www.assemblyscript.org/basics.html#nullability-checks),您可以使用 `if` 语句或三元运算符(`?` 和 `:`),如下所示: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -但是,这仅在您对变量执行 `if` / 三元组而不是属性访问时才有效,如下所示: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -您需要确保初始化 `total.amount` 值,因为如果您尝试像最后一行代码一样求和,程序将崩溃。 所以你要么先初始化它: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -或者您可以更改您的 GraphQL 模式,不给此属性赋予可为空的类型,然后您在 `codegen` 步骤中将其初始化为零 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -编译器会报错,因为您需要为类属性添加初始化程序,或者添加 `!` 运算符: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### 数组初始化 -`Array` 类仍然接受一个数字来初始化列表的长度,但是您应该小心,因为像`.push`的操作实际上会增加大小,而不是添加到开头,例如: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -459,13 +459,13 @@ let arr = new Array(5) // ["", "", "", "", ""] arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ``` -根据您使用的类型,例如可以为空的类型,以及访问它们的方式,您可能会遇到类似下面这样的运行时错误: +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: ``` -ERRO Handler 由于执行失败而跳过,错误: 映射在 ~ lib/array.ts,第110行,第40列中止,并且带有消息: 如果 array 是漏洞 wasm 反向跟踪,那么 Element type 必须为 null: 0:0x19c4-!~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -要想真正在开始的时候推入,你应该将 `Array` 初始化为大小为零,如下所示: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### GraphQL 模式 -这不是一个直接的 AssemblyScript 更改,但是您可能需要更新 `schema.Graphql` 文件。 +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. 现在,您不再能够在类型中定义属于非空列表的字段。如果您有这样的模式: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -您必须向 List 类型的成员添加一个`!` ,如下所示: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -AssemblyScript 版本之间的可空性差异导致了这种改变, 并且这也与 `src/generated/schema.ts`文件(默认路径,您可能已更改)有关。 +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### 其他 -- 将 `Map#set` 和 `Set#add` 与规范对齐,返回 `this` (\[v0.9.2\](https://github.com/AssemblyScript /assemblyscript/releases/tag/v0.9.2)) -- 数组不再继承自 ArrayBufferView,并且现在是完全不同的 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- 从对象字面初始化的类不能再定义构造函数([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- 如果两个操作数都是整数,则 `**` 二元运算的结果现在是公分母整数。 以前,结果是一个浮点数,就像调用 `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- 在转换为 `bool` 时强制 `NaN` 为 `false` (\[v0.14.9\](https://github.com/AssemblyScript/assemblyscript/releases/tag /v0.14.9)) -- 当移动 `i8`/`u8` 或 `i16`/`u16` 类型的小整数值时,只有 4 个 RHS 值的最低有效位中的 3 个会影响结果,类似于 `i32.shl` 的结果仅受 RHS 值的 5 个最低有效位影响。 示例:`someI8 << 8` 以前生成值 `0`,但现在由于将 RHS 屏蔽为`8 & 7 = 0` (3 比特), 而生成 `someI8`([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- 大小不同时关系字符串比较的错误修复 ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- 修复了大小不同时关系字符串比较的错误 ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From 9ee5701122bc34408163604c05517675efd25942 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:26 -0500 Subject: [PATCH 0468/1534] New translations assemblyscript-migration-guide.mdx (Urdu (Pakistan)) --- .../assemblyscript-migration-guide.mdx | 106 +++++++++--------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/website/src/pages/ur/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/ur/resources/release-notes/assemblyscript-migration-guide.mdx index 31439d43c505..8a354cb1c231 100644 --- a/website/src/pages/ur/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/ur/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: اسمبلی سکرپٹ مائیگریشن گائیڈ --- -اب تک، سب گراف [اسمبلی اسکرپٹ کے پہلے ورژن](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6) میں سے ایک استعمال کر رہے ہیں۔ آخر کار ہم نے [جدید ترین دستیاب](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) کے لیے تعاون شامل کر دیا ہے! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 وہ سب گراف ڈویلپرز کو اسمبلی لینگوج اور سٹینڈرڈ لائبریری کی نئ خصوصیات استعمال کرنے پر فعال کرے گا. -یہ ہدایت نامہ `graph-cli`/`graph-ts` ورژن `0.22.0` کے نیچے استعمال کرنے والے ہر فرد پر لاگو ہوتا ہے۔ اگر آپ پہلے ہی اس سے زیادہ (یا برابر) ورژن پر ہیں، تو آپ اسمبلی اسکرپٹ کا ورژن `0.19.10` پہلے ہی استعمال کر رہے ہیں 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> نوٹ: `0.24.0` کے مطابق، `graph-node` سب گراف مینی فیسٹ میں متعین `apiVersion` کی بنیاد پر، دونوں ورژنز کو سپورٹ کر سکتا ہے. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## خصوصیات ### نئ خصوصیت -- `TypedArray`s اب `ArrayBuffer`s سے [نیا ` wrap` جامد طریقہ](https://www.assemblyscript.org/stdlib/typedarray.html#static-members کا استعمال کر کے بنایا جا سکتا ہے) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- لائبریری کے نئے معیاری فنکشنز: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` اور `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) کے x مثال کے لئے تعاون شامل کیا گیا -- شامل کیا گیا `StaticArray`، ایک زیادہ موثر ایرے ویرینٹ ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- شامل کیا گیا `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- `Number#toString` پر `radix` دلیل کو نافذ کیا گیا ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- فلوٹنگ پوائنٹ لٹریلز ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) میں الگ کرنے والوں کے لیے تعاون شامل کیا گیا -- فرسٹ کلاس فنکشنز ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) کے لیے شامل کردہ تعاون -- بلٹ انز شامل کریں: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- `Array/TypedArray/String#at` لاگو کریں ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- تمثیل کے لغوی سٹرنگز کے لیے شامل کیا گیا تعاون ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- `encodeURI(Component)` اور `decodeURI(ج Component)` شامل کریں ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- `toString`، `toDateString` اور `toTimeString` کو `Date` میں شامل کریں ([v0.18.29](https://github.com/ AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- `Date` کے لیے `toUTCSstring` شامل کریں ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- شامل کریں `nonnull/NonNullable` بلٹ ان قسم ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### اصلاح -- `Math` فنکشنز جیسے `exp`، `exp2`، `log`، `log2` اور ` pow` کو تیز تر متغیرات سے بدل دیا گیا ہے ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- `Math.mod` کو تھوڑا سا بہتر بنائیں ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Std میپ اور سیٹ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) میں مزید فیلڈ رسائی کیش کریں -- `ipow32/64` میں دو کی طاقتوں کو بہتر بنائیں ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### دوسرا -- ایک ایرے کے لغوی کی قسم کا اندازہ اب اس کے مواد سے لگایا جا سکتا ہے ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Stdlib کو یونیکوڈ 13.0.0 میں اپ ڈیٹ کیا گیا ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## اپ گریڈ کیسے کریں؟ -1. اپنی میپنگز `apiVersion` کو `subgraph.yaml` میں `0.0.6` میں تبدیل کریں: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. `graph-cli` کو اپ ڈیٹ کریں جسے آپ `تازہ ترین` ورژن میں استعمال کر رہے ہیں یہ چلا کر: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # if you have it globally installed @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. `graph-ts` کے لیۓ بھی ایسا ہی کریں، لیکن عالمی سطح پر انسٹال کرنے کے بجائے، اسے اپنے اہم انحصار میں محفوظ کریں: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. زبان کو توڑنے والی تبدیلیوں کو ٹھیک کرنے کے لیے بقیہ گائیڈ پر عمل کریں. -5. `codegen` اور `deploy` کو دوباری چلائیں. +5. Run `codegen` and `deploy` again. ## بریکنگ تبدیلیاں @@ -110,7 +110,7 @@ maybeValue.aMethod() ### متغیر شیڈونگ -اس سے پہلے کہ آپ [متغیر شیڈونگ](https://en.wikipedia.org/wiki/Variable_shadowing) کر سکیں اور اس طرح کا کوڈ کام کرے گا: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -حل کرنے کے لیے آپ صرف `if` اسٹیٹمنٹ کو اس طرح تبدیل کر سکتے ہیں: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i ### کاسٹنگ -اس سے پہلے کاسٹ کرنے کا عام طریقہ صرف `as` کلیدی لفظ استعمال کرنا تھا، اس طرح: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray تاہم یہ صرف دو صورتوں میں کام کرتا ہے: -- قدیم کاسٹنگ (قسم کے درمیان جیسے کہ `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - کلاس وراثت پر اپکاسٹنگ (سپر کلاس → سب کلاس) مثالیں: @@ -184,7 +184,7 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -دو ایسے حالات ہیں جہاں آپ کاسٹ کرنا چاہیں گے، لیکن `as`/`var` **محفوظ نہیں ہے**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - طبقاتی وراثت میں کمی ( سب کلاس → سپر کلاس) - دو قسموں کے درمیان جو ایک سپر کلاس کا اشتراک کرتے ہیں @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -ان معاملات کے لیے، آپ `changetype` فنکشن استعمال کر سکتے ہیں: +For those cases, you can use the `changetype` function: ```typescript // downcasting on class inheritance @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -اگر آپ صرف منسوخی کو ہٹانا چاہتے ہیں، تو آپ `as` آپریٹر (یا `variable`) کا استعمال جاری رکھ سکتے ہیں، لیکن یقینی بنائیں کہ آپ جانتے ہیں کہ قدر کالعدم نہیں ہوسکتی دوسری صورت میں یہ ٹوٹ جائے گا. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // remove nullability @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -منسوخی کے معاملے کے لیے ہم تجویز کرتے ہیں کہ [منسوخی چیک فیچر](https://www.assemblyscript.org/basics.html#nullability-checks) پر ایک نظر ڈالیں، یہ آپ کے کوڈ کو صاف کر دے گا 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 نیز ہم نے کاسٹنگ کو آسان بنانے کے لیے کچھ اقسام میں کچھ مزید جامد طریقے شامل کیے ہیں، وہ یہ ہیں: @@ -249,7 +249,7 @@ let newBalance = new AccountBalance(balanceId) ### پراپرٹی ایکسیس کے ساتھ منسوخی کی جانچ -[منسوخی چیک فیچر](https://www.assemblyscript.org/basics.html#nullability-checks) استعمال کرنے کے لیے آپ یا تو `if` اسٹیٹمنٹس یا ٹرنری استعمال کرسکتے ہیں۔ آپریٹر (`?` اور `:`) اس طرح: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -تاہم یہ صرف اس وقت کام کرتا ہے جب آپ متغیر پر `if` / ٹرنری کر رہے ہوں، نہ کہ کسی پراپرٹی تک رسائی پر، اس طرح: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -آپ کو `total.amount` ویلیو کو شروع کرنا یقینی بنانا ہوگا، کیونکہ اگر آپ رقم کے لیے آخری لائن کی طرح رسائی حاصل کرنے کی کوشش کرتے ہیں، تو یہ کریش ہوجائے گا۔ تو آپ یا تو اسے پہلے شروع کریں: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -یا آپ اپنے GraphQL سکیما کو تبدیل کر سکتے ہیں تاکہ اس پراپرٹی کے لیے ایک غیر قابل استعمال قسم کا استعمال نہ کیا جائے، پھر ہم اسے `codegen` ویلیو پر صفر کے طور پر شروع کریں گے 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -کمپائلر میں غلطی ہو جائے گی کیونکہ آپ کو یا تو ان خصوصیات کے لیے ایک ابتدائی شامل کرنے کی ضرورت ہے جو کلاسز ہیں، یا `!` آپریٹر شامل کریں: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -451,7 +451,7 @@ export class Something { ### ایرے شروع کرنا -`Array` کلاس ابھی بھی فہرست کی لمبائی کو شروع کرنے کے لیے ایک نمبر کو قبول کرتی ہے، تاہم آپ کو خیال رکھنا چاہیے کیونکہ `.push` جیسے آپریشن شروع میں شامل کرنے کے بجائے سائز میں اضافہ کریں گے، مثال کے طور پر: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -465,7 +465,7 @@ arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -اصل میں شروع میں دھکیلنے کے لیے آپ کو یا تو، `Array` کو سائز صفر کے ساتھ شروع کرنا چاہیے، اس طرح: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -483,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### گراف کیو ایل اسکیما -یہ اسمبلی اسکرپٹ میں براہ راست تبدیلی نہیں ہے، لیکن آپ کو اپنی `schema.graphql` فائل کو اپ ڈیٹ کرنا پڑ سکتا ہے. +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. اب آپ اپنی اقسام میں ان فیلڈز کی وضاحت نہیں کر سکتے جو کہ غیر منسوخ فہرست ہیں۔ اگر آپ کے پاس اس طرح کا سکیما ہے: @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -آپ کو فہرست کی قسم کے میمبر میں ایک `!` شامل کرنا پڑے گا، اس طرح: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -یہ اسمبلی اسکرپٹ ورژنز کے درمیان منسوخی کے فرق کی وجہ سے تبدیل ہوا، اور اس کا تعلق `src/generated/schema.ts` فائل سے ہے (پہلے سے طے شدہ راستہ، ہو سکتا ہے آپ نے اسے تبدیل کیا ہو). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### دوسرا -- قیاس کے ساتھ منسلک `Map#set` اور `Set#add`، واپس کر رہے ہیں `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- ایریس اب ArrayBufferView سے وراثت میں نہیں ملتی ہیں، لیکن اب الگ ہیں ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- آبجیکٹ لٹریلز سے شروع کی گئی کلاسز اب کنسٹرکٹر کی وضاحت نہیں کر سکتی ہیں ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- ایک `**` بائنری آپریشن کا نتیجہ اب مشترکہ ڈینومینیٹر انٹیجر ہے اگر دونوں آپرینڈ انٹیجرز ہیں۔ اس سے پہلے، نتیجہ ایک فلوٹ تھا جیسے کہ کال کر رہا ہو `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- زبردستی `NaN` کو `false` جب `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) پر کاسٹ کریں -- `i8`/`u8` یا `i16`/`u16` قسم کی چھوٹی انٹیجر ویلیو کو منتقل کرتے وقت، بالترتیب صرف 3 کم از کم 4 RHS ویلیو کے اہم بٹس نتیجہ کو متاثر کرتے ہیں، جو کہ `i32.shl` کے نتیجہ کے مطابق صرف RHS ویلیو کے 5 کم سے کم اہم بٹس سے متاثر ہوتے ہیں۔ مثال: `someI8 << 8` نے پہلے `0` کی ویلیو تیار کی تھی، لیکن اب RHS کو `8 & 7 = 0` (3 بٹس) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- جب سائز مختلف ہوں تو متعلقہ سٹرنگ کے موازنہ کی بگ فکس ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From 324d6967436e168e44b628572207c643bcb7d1fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:27 -0500 Subject: [PATCH 0469/1534] New translations assemblyscript-migration-guide.mdx (Vietnamese) --- .../assemblyscript-migration-guide.mdx | 92 +++++++++---------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/website/src/pages/vi/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/vi/resources/release-notes/assemblyscript-migration-guide.mdx index 69c36218d8af..20f0fcfaf8e8 100644 --- a/website/src/pages/vi/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/vi/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,49 @@ title: Hướng dẫn Di chuyển AssemblyScript --- -Cho đến nay, các subgraph đang sử dụng một trong các [phiên bản đầu tiên của AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Cuối cùng, chúng tôi đã thêm hỗ trợ cho [bản mới nhất hiện có](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 Điều đó sẽ cho phép các nhà phát triển subgrap sử dụng các tính năng mới hơn của ngôn ngữ AS và thư viện chuẩn. -Hướng dẫn này có thể áp dụng cho bất kỳ ai sử dụng `graph-cli`/`graph-ts` dưới phiên bản `0.22.0`. Nếu bạn đã ở phiên bản cao hơn (hoặc bằng) với phiên bản đó, bạn đã sử dụng phiên bản`0.19.10` của AssemblyScript 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> Lưu ý: Kể từ `0.24.0`, `graph-node` có thể hỗ trợ cả hai phiên bản, tùy thuộc vào `apiVersion` được chỉ định trong tệp kê khai subgraph. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Các đặc điểm ### Chức năng mới -- `TypedArray`s bây giờ có thể được xây dựng từ `ArrayBuffer`s bằng cách sử dụng [phương pháp tĩnh `wrap` mới](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Các chức năng thư viện tiêu chuẩn mới: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Đã thêm hỗ trợ cho x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Đã thêm `StaticArray`, một biến thể mảng hiệu quả hơn ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Đã thêm `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Đã thực hiện đối số `radix` trên `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Đã thêm hỗ trợ cho dấu phân cách trong các ký tự dấu phẩy động ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Đã thêm hỗ trợ cho các chức năng hạng nhất ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) - Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Thực hiện `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Đã thêm hỗ trợ cho chuỗi ký tự mẫu ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Thêm `encodeURI(Component)` và `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Thêm `toString`, `toDateString` và `toTimeString` vào `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Thêm `toUTCString` cho `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) - Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Tối ưu hóa -- Các chức năng `Math` như `exp`, `exp2`, `log`, `log2` và `pow` đã được thay thế bằng các biến thể nhanh hơn ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Tối ưu hóa một chút `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Lưu vào bộ nhớ cache các truy cập trường khác trong std Map và Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Tối ưu hóa cho sức mạnh của hai trong `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) ### Khác -- Kiểu của một ký tự mảng bây giờ có thể được suy ra từ nội dung của nó ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Đã cập nhật stdlib thành Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## Làm thế nào để nâng cấp? -1. Thay đổi `apiVersion` ánh xạ của bạn trong `subgraph.yaml` thành `0.0.6`: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. Cập nhật `graph-cli` bạn đang sử dụng thành phiên bản `latest` bằng cách chạy: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # nếu bạn đã cài đặt nó trên toàn cầu @@ -66,14 +66,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Làm tương tự đối với `graph-ts`, nhưng thay vì cài đặt trên toàn cầu, hãy lưu nó trong các phần phụ thuộc chính của bạn: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. Làm theo phần còn lại của hướng dẫn để sửa các thay đổi về lỗi ngôn ngữ. -5. Chạy `codegen` và `deploy` lại. +5. Run `codegen` and `deploy` again. ## Thay đổi đột phá @@ -110,7 +110,7 @@ Nếu bạn không chắc nên chọn cái nào, chúng tôi khuyên bạn nên ### Variable Shadowing (Che khuất Biến) -Trước khi bạn có thể thực hiện [che biến](https://en.wikipedia.org/wiki/Variable_shadowing) và mã như thế này sẽ hoạt động: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -Để giải quyết, bạn có thể chỉ cần thay đổi câu lệnh `if` thành một cái gì đó như sau: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -155,7 +155,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i ### Ép kiểu (Casting) -Cách phổ biến để thực hiện ép kiểu trước đây là chỉ sử dụng từ khóa `as`, như sau: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) @@ -164,7 +164,7 @@ let uint8Array = byteArray as Uint8Array // equivalent to: byteArray Tuy nhiên, điều này chỉ hoạt động trong hai trường hợp: -- Ép kiểu nguyên bản (giữa các kiểu như `u8`, `i32`, `bool`; ví dụ: `let b: isize = 10; b as usize`); +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting về kế thừa lớp (lớp con → lớp cha) (subclass → superclass) Các ví dụ: @@ -184,7 +184,7 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -Có hai trường hợp mà bạn có thể muốn ép kiểu, nhưng việc sử dụng `as`/`var` **không an toàn**: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - Downcasting về kế thừa lớp (lớp con → lớp cha) (subclass → superclass) - Giữa hai loại chia sẻ lớp cha @@ -206,10 +206,10 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -Đối với những trường hợp đó, bạn có thể sử dụng hàm `changetype`: +For those cases, you can use the `changetype` function: ```typescript -// downcasting về kế thừa lớp +// downcasting on class inheritance class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -Nếu bạn chỉ muốn loại bỏ khả năng vô hiệu, bạn có thể tiếp tục sử dụng toán tử `as` (hoặc `variable`), nhưng hãy đảm bảo rằng bạn biết rằng giá trị không được rỗng (null), nếu không nó sẽ bị vỡ. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // loại bỏ khả năng vô hiệu @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -Đối với trường hợp vô hiệu, chúng tôi khuyên bạn nên xem xét [tính năng kiểm tra khả năng vô hiệu](https://www.assemblyscript.org/basics.html#nullability-checks), nó sẽ giúp mã của bạn sạch hơn 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Ngoài ra, chúng tôi đã thêm một vài phương thức tĩnh trong một số kiểu để dễ dàng ép kiểu, chúng là: @@ -249,7 +249,7 @@ Ngoài ra, chúng tôi đã thêm một vài phương thức tĩnh trong một s ### Kiểm tra tính vô hiệu với quyền truy cập thuộc tính -Để sử dụng [tính năng kiểm tra tính vô hiệu](https://www.assemblyscript.org/basics.html#nullability-checks), bạn có thể sử dụng câu lệnh `if` hoặc câu lệnh ba toán tử (`?` and `:`) như thế này: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -Tuy nhiên, điều đó chỉ hoạt động khi bạn đang thực hiện `if` / ternary trên một biến, không phải trên quyền truy cập thuộc tính, như sau: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -Bạn cần đảm bảo khởi tạo giá trị `total.amount`, bởi vì nếu bạn cố gắng truy cập như ở dòng cuối cùng cho tổng, nó sẽ bị lỗi. Vì vậy, bạn có thể khởi tạo nó trước: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Hoặc bạn chỉ có thể thay đổi lược đồ GraphQL của mình để không sử dụng kiểu nullable cho thuộc tính này, sau đó chúng tôi sẽ khởi tạo nó bằng 0 ở bước `codegen` 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -425,7 +425,7 @@ export class Something { } ``` -Trình biên dịch sẽ bị lỗi vì bạn cần thêm bộ khởi tạo cho các thuộc tính là các lớp hoặc thêm toán tử `!`: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -515,10 +515,10 @@ This changed because of nullability differences between AssemblyScript versions, ### Khác -- Căn chỉnh `Map#set` và `Set#add` với thông số kỹ thuật, trả về `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) - Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Các lớp được khởi tạo từ các ký tự đối tượng không còn có thể xác định một phương thức khởi tạo nữa ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Kết quả của phép toán nhị phân `**` bây giờ là số nguyên mẫu số chung nếu cả hai toán hạng đều là số nguyên. Trước đây, kết quả là một float như thể đang gọi `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Ép buộc `NaN` thành `false` khi ép kiểu thành `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Khi dịch chuyển một giá trị số nguyên nhỏ của kiểu `i8`/`u8` hoặc `i16`/`u16`, chỉ 3 bit tương ứng 4 bit ít quan trọng nhất của giá trị RHS ảnh hưởng đến kết quả, tương tự như kết quả của một `i32.shl` chỉ bị ảnh hưởng bởi 5 bit ít quan trọng nhất của giá trị RHS. Ví dụ: `someI8 << 8` trước đây đã tạo ra giá trị `0`, nhưng bây giờ tạo ra`someI8` do che dấu RHS là `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Sửa lỗi so sánh chuỗi quan hệ khi kích thước khác nhau ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From edfe4ec250d0052cdfc8fe735bca1cd3dedf14f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:28 -0500 Subject: [PATCH 0470/1534] New translations assemblyscript-migration-guide.mdx (Marathi) --- .../assemblyscript-migration-guide.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/mr/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/mr/resources/release-notes/assemblyscript-migration-guide.mdx index a170ebec8cda..b989c4de4c11 100644 --- a/website/src/pages/mr/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/mr/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,13 +2,13 @@ title: AssemblyScript Migration Guide --- -आत्तापर्यंत, सबग्राफ [असेंबलीस्क्रिप्टच्या पहिल्या आवृत्त्यांपैकी एक वापरत आहेत](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). शेवटी आम्ही [नवीन उपलब्ध](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) साठी समर्थन जोडले आहे! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 हे सबग्राफ विकसकांना AS भाषा आणि मानक लायब्ररीची नवीन वैशिष्ट्ये वापरण्यास सक्षम करेल. -ही मार्गदर्शक आवृत्ती `0.22.0` खालील `graph-cli`/`graph-ts` वापरणाऱ्या प्रत्येकासाठी लागू आहे. तुम्‍ही आधीच त्‍याच्‍या पेक्षा वरच्‍या (किंवा समान) आवृत्‍तीवर असल्‍यास, तुम्‍ही असेंबली स्क्रिप्‍टची `0.19.10` आवृत्ती आधीच वापरत आहात 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> टीप: `0.24.0` नुसार, सबग्राफ मॅनिफेस्टमध्ये निर्दिष्ट केलेल्या `apiVersion` वर अवलंबून `graph-node` दोन्ही आवृत्त्यांना समर्थन देऊ शकते. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Features @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -तुम्हाला फक्त शून्यता काढून टाकायची असल्यास, तुम्ही `as` ऑपरेटर (किंवा `व्हेरिएबल`) वापरणे सुरू ठेवू शकता, परंतु हे मूल्य शून्य असू शकत नाही याची खात्री करा., अन्यथा तो खंडित होईल. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // remove nullability @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -शून्यता प्रकरणासाठी आम्ही [शून्यता तपासणी वैशिष्ट्य](https://www.assemblyscript.org/basics.html#nullability-checks) पाहण्याची शिफारस करतो, ते तुमचा कोड अधिक क्लीनर बनवेल 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Also we've added a few more static methods in some types to ease casting, they are: @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -तुम्हाला `total.amount` मूल्य सुरू केल्याची खात्री करणे आवश्यक आहे, कारण जर तुम्ही बेरीजसाठी शेवटच्या ओळीत प्रवेश करण्याचा प्रयत्न केला तर ते क्रॅश होईल. तर तुम्ही एकतर ते प्रथम आरंभ करा: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -किंवा या मालमत्तेसाठी रद्द करता येणारा प्रकार न वापरण्यासाठी तुम्ही फक्त तुमचा GraphQL स्कीमा बदलू शकता, नंतर आम्ही ते `codegen` पायरीवर शून्य म्हणून सुरू करू 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -451,7 +451,7 @@ export class Something { ### Array initialization -`अॅरे` वर्ग अजूनही सूचीची लांबी सुरू करण्यासाठी संख्या स्वीकारतो, तथापि तुम्ही काळजी घेतली पाहिजे कारण `.push` सारखी ऑपरेशन्स सुरुवातीला जोडण्याऐवजी आकार वाढवतील., उदाहरणार्थ: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -असेंबलीस्क्रिप्ट आवृत्त्यांमधील शून्यता भिन्नतेमुळे हे बदलले आणि ते `src/generated/schema.ts` फाइलशी संबंधित आहे (डिफॉल्ट मार्ग, तुम्ही कदाचित हे बदलले असेल). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### Other - Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) - Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- दोन्ही ऑपरेंड पूर्णांक असल्यास `**` बायनरी ऑपरेशनचा परिणाम आता सामान्य भाजक पूर्णांक आहे. पूर्वी, परिणाम `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) वर कॉल केल्यासारखा फ्लोट होता +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) - Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- `i8`/`u8` किंवा `i16`/`u16` प्रकाराचे लहान पूर्णांक मूल्य हलवताना, फक्त 3 अनुक्रमे 4 किमान RHS मूल्याचे महत्त्वपूर्ण बिट्स परिणामावर परिणाम करतात, `i32.shl` च्या परिणामाप्रमाणेच RHS मूल्याच्या 5 सर्वात कमी महत्त्वपूर्ण बिट्सवर परिणाम होतो. उदाहरण: `someI8 << 8` ने पूर्वी `0` मूल्य तयार केले होते, परंतु आता `someI8` तयार करते कारण RHS ला `8 & 7 = 0` (3 बिट) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- जेव्हा आकार भिन्न असतात तेव्हा रिलेशनल स्ट्रिंग तुलनांचे दोष निराकरण ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) From 3aca7a2c0ab64e08d9f767fa5ca809fda378b4fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:29 -0500 Subject: [PATCH 0471/1534] New translations assemblyscript-migration-guide.mdx (Hindi) --- .../assemblyscript-migration-guide.mdx | 137 +++++++++--------- 1 file changed, 69 insertions(+), 68 deletions(-) diff --git a/website/src/pages/hi/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/hi/resources/release-notes/assemblyscript-migration-guide.mdx index 6bc091a4083a..2917ae036756 100644 --- a/website/src/pages/hi/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/hi/resources/release-notes/assemblyscript-migration-guide.mdx @@ -2,49 +2,50 @@ title: असेंबलीस्क्रिप्ट माइग्रेशन गाइड --- -अब तक, सबग्राफ [AssemblyScript के पहले संस्करणों](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6) में से किसी एक का उपयोग करते रहे हैं। अंत में हमने [नवीनतम उपलब्ध](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) के लिए समर्थन जोड़ा है! 🎉 +अब तक, सबग्राफ [AssemblyScript के शुरुआती संस्करणों](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) में से एक का उपयोग कर रहे थे (v0.6)। अंततः हमने सबसे [नए उपलब्ध संस्करण](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) के लिए समर्थन जोड़ दिया है! 🎉 यह सबग्राफ डेवलपर्स को एएस भाषा और मानक पुस्तकालय की नई सुविधाओं का उपयोग करने में सक्षम करेगा। -यह मार्गदर्शिका `graph-cli`/`graph-ts` नीचे दिए गए संस्करण `0.22.0` का उपयोग करने वाले किसी भी व्यक्ति के लिए लागू है। यदि आप पहले से ही उससे अधिक (या बराबर) संस्करण पर हैं, तो आप पहले से ही असेंबलीस्क्रिप्ट 🙂 के संस्करण `0.19.10` का उपयोग कर रहे हैं +यह मार्गदर्शक उन सभी लोगों के लिए लागू है जो `graph-cli`/`graph-ts` का संस्करण `0.22.0` से कम उपयोग कर रहे हैं। यदि आप पहले से ही इस संस्करण (या उससे उच्च) पर हैं, तो आप पहले से ही AssemblyScript के संस्करण `0.19.10` का उपयोग कर रहे हैं 🙂 -> नोट: `0.24.0` के अनुसार, `ग्राफ़-नोड` सबग्राफ मेनिफ़ेस्ट में निर्दिष्ट `apiVersion` के आधार पर दोनों संस्करणों का समर्थन कर सकता है। +> `0.24.0` संस्करण से, `graph-node` दोनों संस्करणों का समर्थन कर सकता है, यह इस पर निर्भर करता है कि subgraph manifest में कौन सा `apiVersion` निर्दिष्ट किया गया है। ## विशेषताएँ ### नई कार्यक्षमता -- `TypedArray` को अब `ArrayBuffer` से [नई `रैप` स्टैटिक विधि](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) का उपयोग करके बनाया जा सकता है ([v0. 8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- नई standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` और `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray` को अब `ArrayBuffer` से बनाया जा सकता है[ नए `wrap` static method ](https://www.assemblyscript.org/stdlib/typedarray.html#static-members)का उपयोग करके ([v0.8.1] + (https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1))। +- नई मानक लाइब्रेरी फ़ंक्शन: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` और `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- GenericClass के लिए x instanceof समर्थन जोड़ा गया ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- `StaticArray` जो एक अधिक कुशल array प्रकार है, जोड़ा गया ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- `Array#flat` जोड़ा गया ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- `Number#toString` पर `radix` आर्गुमेंट लागू किया ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- फ्लोटिंग पॉइंट लिटरल्स में सेपरेटर्स के लिए समर्थन जोड़ा गया ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- पहली श्रेणी के फ़ंक्शन्स के लिए समर्थन जोड़ा गया ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- बिल्ट-इन जोड़ें: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- `Array/TypedArray/String#at` को लागू करें ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- टेम्पलेट लिटरल स्ट्रिंग्स के लिए सपोर्ट जोड़ा गया ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- `encodeURI(Component)` और `decodeURI(Component)` को जोड़ा गया ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- `toString`, `toDateString` और `toTimeString` को `Date` में जोड़ें ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- `toUTCString` के लिए `Date` में जोड़ें ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- `nonnull/NonNullable` बिल्ट-इन टाइप जोड़ा गया ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### अनुकूलन -- `Math` फंक्शन जैसे `exp`, `exp2`, `log`, `log2` और ` pow` को तेज़ वेरिएंट से बदल दिया गया है ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- `Math.mod` को थोड़ा अनुकूलित करें ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- एसटीडी मानचित्र और सेट ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) में कैश अधिक फ़ील्ड एक्सेस करता है -- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math` फ़ंक्शंस जैसे कि `exp`, `exp2`, `log`, `log2` और `pow` को तेज़ वेरिएंट्स से बदल दिया गया है ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- थोड़ा ऑप्टिमाइज़ करें `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- स्टैंडर्ड मैप और सेट में अधिक फ़ील्ड एक्सेस कैश करें ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- `ipow32/64`([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) में दो के शक्तियों के लिए अनुकूलित करें ### अन्य -- किसी ऐरे लिटरल के प्रकार का अनुमान अब इसकी सामग्री से लगाया जा सकता है ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- यूनिकोड 13.0.0 में अपडेट किया गया stdlib ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- एक एरे लिटरल के प्रकार को अब इसकी सामग्री से अनुमानित किया जा सकता है ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib को Unicode 13.0.0 में अपडेट किया गया ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) ## कैसे करें अपग्रेड? -1. अपनी मैपिंग `apiVersion` को `subgraph.yaml` में `0.0.6` में बदलें: +1. अपने मानचित्रण `सबग्राफ.yaml` में `apiVersion` को `0.0.6` में बदलें: ```yaml ... @@ -56,7 +57,7 @@ dataSources: ... ``` -2. आप जिस `graph-cli` का उपयोग कर रहे हैं उसे `नवीनतम` संस्करण में चलाकर अपडेट करें: +2. आप जो `graph-cli` उपयोग कर रहे हैं, उसे `latest` संस्करण में अपडेट करने के लिए यह चलाएँ: ```bash # if you have it globally installed @@ -66,14 +67,14 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. `graph-ts` के लिए भी ऐसा ही करें, लेकिन विश्व स्तर पर स्थापित करने के बजाय, इसे अपनी मुख्य निर्भरताओं में सहेजें: +3. यह वही करें `graph-ts`, लेकिन इसे ग्लोबली इंस्टॉल करने के बजाय, इसे अपनी मुख्य निर्भरताओं में सेव करें: ```bash npm install --save @graphprotocol/graph-ts@latest ``` 4. भाषा संबंधी परिवर्तनों को ठीक करने के लिए शेष मार्गदर्शिका का पालन करें। -5. `codegen` चलाएँ और `deploy` फिर से करें। +5. `codegen` और `deploy` को फिर से चलाएं। ## ब्रेकिंग परिवर्तन @@ -110,7 +111,7 @@ maybeValue.aMethod() ### Variable Shadowing -इससे पहले कि आप [वैरिएबल शैडोइंग](https://en.wikipedia.org/wiki/Variable_shadowing) कर पाते और इस तरह का कोड काम करेगा: +पहले आप [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) कर सकते थे और कोड इस तरह काम करता था: ```typescript let a = 10 @@ -141,7 +142,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -हल करने के लिए आप केवल `if` कथन को कुछ इस तरह से बदल सकते हैं: +समस्या को हल करने के लिए आप बस `if` स्टेटमेंट को इस प्रकार बदल सकते हैं: ```typescript if (!decimals) { @@ -155,16 +156,16 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i ### कास्टिंग -पहले कास्टिंग करने का सामान्य तरीका केवल `as` कीवर्ड का उपयोग करना था, जैसे: +पहले कास्टिंग करने का सामान्य तरीका `as` कीवर्ड का उपयोग करना था, जैसे कि इस प्रकार: ```typescript -चलो byteArray = नया ByteArray (10) -चलो uint8Array = byteArray Uint8Array के रूप में // इसके बराबर: byteArray +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // इसका समकक्ष: byteArray ``` However this only works in two scenarios: -- प्रिमिटिव कास्टिंग (`u8`, `i32`, `bool` जैसे प्रकारों के बीच; उदाहरण: `let b: isize = 10; b as use`); +- प्रिमिटिव कास्टिंग (जैसे `u8`, `i32`, `bool` प्रकारों के बीच; उदाहरण के लिए: `let b: isize = 10; b as usize`); - क्लास इनहेरिटेंस (सबक्लास → सुपरक्लास) पर अपकास्टिंग उदाहरण: @@ -177,55 +178,55 @@ let c: usize = a + (b as usize) ``` ```typescript -// upcasting on class inheritance +// क्लास इनहेरिटेंस पर उपकास्टिंग class Bytes extends Uint8Array {} let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array +// bytes // यह समान है: bytes as Uint8Array ``` -ऐसे दो परिदृश्य हैं जहां आप कास्ट करना चाहते हैं, लेकिन `as``var` **का उपयोग करना सुरक्षित नहीं है **: +यहां दो परिदृश्य हैं जहाँ आप कास्ट करना चाह सकते हैं, लेकिन `as`/`var` का उपयोग **सुरक्षित नहीं है**: - क्लास इनहेरिटेंस (सुपरक्लास → सबक्लास) पर डाउनकास्टिंग - एक सुपरक्लास साझा करने वाले दो प्रकारों के बीच ```typescript -// downcasting on class inheritance +// क्लास इनहेरिटेंस में डाउनकास्टिंग class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( +// uint8Array // रनटाइम में ब्रेक हो जाता है :( ``` ```typescript -// between two types that share a superclass +// दो प्रकारों के बीच जो एक superclass साझा करते हैं class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} let bytes = new Bytes(2) -// bytes // breaks in runtime :( +// bytes // रनटाइम में टूट जाता है :( ``` -उन मामलों के लिए, आप `changetype` फ़ंक्शन का उपयोग कर सकते हैं: +ऐसे मामलों के लिए, आप `changetype` फ़ंक्शन का उपयोग कर सकते हैं: ```typescript -// downcasting on class inheritance +// क्लास इनहेरिटेंस पर डाउनकास्टिंग class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) +changetype(uint8Array) // काम करता है :) ``` ```typescript -// between two types that share a superclass +// दो प्रकारों के बीच जो एक सुपरक्लास साझा करते हैं class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} -let bytes = new Bytes(2) -changetype(bytes) // works :) +let bytes = new Bytes(2); +changetype(bytes); // काम करता है :) ``` -यदि आप केवल अशक्तता को हटाना चाहते हैं, तो आप `as` ऑपरेटर (या `variable`) का उपयोग करना जारी रख सकते हैं, लेकिन सुनिश्चित करें कि आप जानते हैं कि मान शून्य नहीं हो सकता, नहीं तो टूट जाएगा। +यदि आप केवल nullability को हटाना चाहते हैं, तो आप `as` ऑपरेटर (या `variable`) का उपयोग जारी रख सकते हैं, लेकिन यह सुनिश्चित करें कि आपको पता है कि वह मान null नहीं हो सकता है, अन्यथा यह टूट जाएगा। ```typescript // remove nullability @@ -238,7 +239,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -अशक्तता मामले के लिए हम [अशक्तता जांच सुविधा](https://www.assemblyscript.org/basics.html#nullability-checks) पर एक नज़र डालने की सलाह देते हैं, यह आपके कोड को साफ कर देगा 🙂 +nullability के मामले में, हम सुझाव देते हैं कि आप [nullability check विशेषता](https://www.assemblyscript.org/basics.html#nullability-checks) पर एक नज़र डालें, यह आपकी कोड को अधिक साफ-सुथरा बना देगा 🙂 साथ ही, हमने कास्टिंग को आसान बनाने के लिए कुछ प्रकारों में कुछ और स्थैतिक विधियाँ जोड़ी हैं, वे हैं: @@ -249,7 +250,7 @@ let newBalance = new AccountBalance(balanceId) ### Nullability check with property access -[nullability-checks">nullability check सुविधा](https://www.assemblyscript.org/basics.html#nullability-checks) का उपयोग करने के लिए आप या तो `if` स्टेटमेंट या टर्नरी का उपयोग कर सकते हैं ऑपरेटर (`?` और `:`) इस तरह: +[nullability check विशेषता](https://www.assemblyscript.org/basics.html#nullability-checks) का उपयोग करने के लिए, आप या तो `if` स्टेटमेंट्स का उपयोग कर सकते हैं या टर्नरी ऑपरेटर (`?` और `:`) का उपयोग कर सकते हैं, इस प्रकार: ```typescript let something: string | null = 'data' @@ -267,7 +268,7 @@ if (something) { } ``` -हालांकि यह केवल तभी काम करता है जब आप `if` / ternary एक चर पर कर रहे हों, संपत्ति के उपयोग पर नहीं, जैसे: +हालांकि, यह केवल तब काम करता है जब आप किसी वेरिएबल पर `if` / टर्नरी का उपयोग कर रहे हों, न कि किसी प्रॉपर्टी एक्सेस पर, जैसे: ```typescript class Container { @@ -380,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -आपको `total.amount` मान को इनिशियलाइज़ करना सुनिश्चित करना होगा, क्योंकि यदि आप राशि के लिए अंतिम पंक्ति की तरह एक्सेस करने का प्रयास करते हैं, तो यह क्रैश हो जाएगा। तो आप या तो इसे पहले इनिशियलाइज़ करें: +आपको यह सुनिश्चित करना होगा कि `total.amount` मान को प्रारंभिक रूप से सेट किया जाए, क्योंकि यदि आप इसे अंतिम पंक्ति में जोड़ने के लिए एक्सेस करने का प्रयास करेंगे, तो यह क्रैश हो जाएगा। तो आपको इसे पहले प्रारंभिक रूप से सेट करना होगा। ```typescript let total = Total.load('latest') @@ -393,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -या आप इस संपत्ति के लिए एक अशक्त प्रकार का उपयोग नहीं करने के लिए अपनी ग्राफक्यूएल स्कीमा को बदल सकते हैं, फिर हम इसे `कोडजेन` चरण 😉 पर शून्य के रूप में आरंभ करेंगे +या आप अपनी GraphQL स्कीमा को इस प्रॉपर्टी के लिए nullable टाइप का उपयोग न करने के लिए बदल सकते हैं, फिर हम इसे `codegen` स्टेप में शून्य के रूप में इनिशियलाइज करेंगे 😉 ```graphql type Total @entity { @@ -424,7 +425,7 @@ export class Something { } ``` -कंपाइलर त्रुटि करेगा क्योंकि आपको या तो उन संपत्तियों के लिए एक इनिशियलाइज़र जोड़ने की आवश्यकता है जो कक्षाएं हैं, या `!` ऑपरेटर जोड़ें: +कंपाइलर में त्रुटि होगी क्योंकि आपको उन गुणों के लिए या तो एक प्रारंभिक मान जोड़ने की आवश्यकता है जो कक्षाएँ हैं, या `!` ऑपरेटर जोड़ने की आवश्यकता है। ```typescript export class Something { @@ -450,21 +451,21 @@ export class Something { ### सरणी आरंभीकरण -अनियमित आरंभिक `Array` वर्ग अभी भी सूची की लंबाई को प्रारंभ करने के लिए एक संख्या स्वीकार करता है, हालांकि आपको ध्यान रखना चाहिए क्योंकि `.push` जैसे संचालन वास्तव में आकार में जोड़ने के बजाय बढ़ाएंगे शुरुआत, उदाहरण के लिए:रण +`Array` क्लास अभी भी एक नंबर स्वीकार करता है जिससे सूची की लंबाई को प्रारंभिक रूप से सेट किया जा सकता है, हालांकि आपको सावधानी बरतनी चाहिए क्योंकि .`push` जैसी ऑपरेशन्स वास्तव में आकार बढ़ा देंगी बजाय इसके कि वे शुरुआत में जोड़ें, उदाहरण के लिए: ```typescript let arr = new Array(5) // ["", "", "", "", ""] -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +arr.push('something') // ["", "", "", "", "", "something"] // आकार 6 :( ``` आपके द्वारा उपयोग किए जा रहे प्रकारों के आधार पर, उदाहरण के लिए अशक्त वाले, और आप उन्हें कैसे एक्सेस कर रहे हैं, आपको इस तरह की रनटाइम त्रुटि का सामना करना पड़ सकता है: ``` -निष्पादन विफलता के कारण ERRO हैंडलर को छोड़ दिया गया, त्रुटि: ~lib/array.ts, पंक्ति 110, कॉलम 40 पर संदेश के साथ निरस्त किया गया: संदेश के साथ तत्व प्रकार अशक्त होना चाहिए यदि सरणी छिद्रपूर्ण वासम बैकट्रेस है: 0: 0x19c4 - <अज्ञात>! ~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/ @graphprotocol/graph-ts/वैश्विक/वैश्विक/id_of_type +ERRO handler निष्पादन विफलता के कारण छोड़ दिया गया, त्रुटि: ~lib/array.ts, पंक्ति 110, कॉलम 40 पर मानचित्रण बंद कर दी गई, संदेश के साथ: यदि एरे में छेद हो तो तत्व प्रकार को नल होना चाहिए wasm बैकट्रेस: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -वास्तव में शुरुआत में पुश करने के लिए, आपको `Array` आकार शून्य के साथ प्रारंभ करना चाहिए, जैसे: +शुरुआत में वास्तव में पुश करने के लिए, आपको `Array` को शून्य आकार के साथ इस प्रकार प्रारंभ करना चाहिए: ```typescript let arr = new Array(0) // [] @@ -482,7 +483,7 @@ arr[0] = 'something' // ["something", "", "", "", ""] ### ग्राफक्यूएल स्कीमा -यह सीधे असेंबलीस्क्रिप्ट परिवर्तन नहीं है, लेकिन आपको अपनी `schema.graphql` फ़ाइल को अपडेट करना पड़ सकता है। +यह एक सीधे `AssemblyScript` में परिवर्तन नहीं है, लेकिन आपको अपनी `schema.graphql` फ़ाइल को अपडेट करना पड़ सकता है। अब आप अपने प्रकारों में उन क्षेत्रों को परिभाषित नहीं कर सकते हैं जो गैर-शून्य सूची हैं। यदि आपके पास ऐसा स्कीमा है: @@ -497,7 +498,7 @@ type MyEntity @entity { } ``` -आपको सूची प्रकार के सदस्य में एक `!` जोड़ना होगा, जैसे: +आपको List प्रकार के सदस्य में ! जोड़ना होगा, इस तरह: ```graphql type Something @entity { @@ -510,14 +511,14 @@ type MyEntity @entity { } ``` -असेंबलीस्क्रिप्ट संस्करणों के बीच अशक्तता के अंतर के कारण यह बदल गया, और यह `src/generated/schema.ts` फ़ाइल से संबंधित है (डिफ़ॉल्ट पथ, आपने इसे बदल दिया होगा)। +यह AssemblyScript संस्करणों के बीच nullability अंतर के कारण बदला, और यह `src/generated/schema.ts` फ़ाइल से संबंधित है (डिफ़ॉल्ट पथ, आप इसे बदल सकते हैं)। ### अन्य -- विनिर्देश के साथ `Map#set` और `Set#add` संरेखित, `यह` लौटाता है ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- ऑब्जेक्ट लिटरल से प्रारंभ की गई कक्षाएं अब कंस्ट्रक्टर को परिभाषित नहीं कर सकती हैं ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- यदि दोनों ऑपरेंड पूर्णांक हैं, तो `**` बाइनरी ऑपरेशन का परिणाम अब सामान्य भाजक पूर्णांक है। पहले, परिणाम एक फ़्लोट था जैसे `Math/f.pow` ([v0 0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) को कॉल कर रहा हो.11. -- `बूल` पर कास्ट करते समय `NaN` को `गलत` पर जोर दें ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- `i8`/`u8` या `i16`/`u16` प्रकार के छोटे पूर्णांक मान को स्थानांतरित करते समय, केवल 3 क्रमशः 4 न्यूनतम RHS मान के महत्वपूर्ण बिट परिणाम को प्रभावित करते हैं, जो कि `i32.shl` के परिणाम के अनुरूप होता है, केवल RHS मान के 5 सबसे कम महत्वपूर्ण बिट्स से प्रभावित होता है। उदाहरण: `someI8 << 8` ने पहले `0` मान उत्पन्न किया था, लेकिन अब RHS को `8 & 7 = 0` (3 बिट) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- आकार भिन्न होने पर संबंधपरक स्ट्रिंग तुलनाओं का बग समाधान ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- स्पेसिफिकेशन के साथ `Map#set` और `Set#add` को संरेखित किया गया, जो `this` को लौटाता है ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- ऐरे अब ArrayBufferView से विरासत में नहीं मिलते हैं, बल्कि अब वे अलग हैं ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- वस्तु साहित्य से प्रारंभ की गई कक्षाएँ अब एक निर्माता (constructor) परिभाषित नहीं कर सकतीं ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- `**` बाइनरी ऑपरेशन का परिणाम अब सामान्य हरियाली पूर्णांक होता है यदि दोनों ऑपरेन्ड पूर्णांक होते हैं। पहले, परिणाम एक फ्लोट होता था जैसे कि `Math/f.pow` को कॉल करते समय ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- `NaN` को `false` में बदलें जब `bool` में कास्ट किया जाए ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- जब एक छोटे पूर्णांक मान को `i8`/`u8` या `i16`/`u16` प्रकार में शिफ्ट किया जाता है, तो केवल RHS मान के 3 या 4 सबसे कम महत्वपूर्ण बिट्स परिणाम को प्रभावित करते हैं, जैसे कि `i32.shl` के परिणाम को केवल RHS मान के 5 सबसे कम महत्वपूर्ण बिट्स द्वारा प्रभावित किया जाता है। उदाहरण: `someI8 << 8` पहले `0` मान उत्पन्न करता था, लेकिन अब `someI8` उत्पन्न करता है क्योंकि RHS को मास्क किया गया है जैसा कि `8 & 7 = 0` (3 बिट्स) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- वर्शन ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) में आकारों के भिन्न होने पर संबंधी स्ट्रिंग तुलना में बग सुधार From 6aaad40c27c93d1b2a55e45085454b1225be76d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:31 -0500 Subject: [PATCH 0472/1534] New translations graphql-validations-migration-guide.mdx (French) --- .../release-notes/graphql-validations-migration-guide.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/fr/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/fr/resources/release-notes/graphql-validations-migration-guide.mdx index 62e5435c0fc3..567231e0bedf 100644 --- a/website/src/pages/fr/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/fr/resources/release-notes/graphql-validations-migration-guide.mdx @@ -103,7 +103,7 @@ query myData { } query myData2 { - # renommer la deuxième requête + # renommer la deuxième requête name } ``` @@ -158,7 +158,7 @@ _Solution:_ ```graphql query myData($id: String) { - # conserver la variable pertinente (ici : `$id: String`) + # conserver la variable pertinente (ici : `$id: String`) id ...MyFields } @@ -259,7 +259,7 @@ query { ```graphql # Différents arguments peuvent conduire à des données différentes, -# donc nous ne pouvons pas supposer que les champs seront les mêmes. +# donc nous ne pouvons pas supposer que les champs seront les mêmes. query { dogs { doesKnowCommand(dogCommand: SIT) From 0dc504702ee9084577a453d9c86c8b72c0e92994 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:32 -0500 Subject: [PATCH 0473/1534] New translations graphql-validations-migration-guide.mdx (Spanish) --- .../release-notes/graphql-validations-migration-guide.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/es/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/es/resources/release-notes/graphql-validations-migration-guide.mdx index 55801738ddca..292c60a70cf9 100644 --- a/website/src/pages/es/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/es/resources/release-notes/graphql-validations-migration-guide.mdx @@ -406,6 +406,7 @@ query { user { id image # 'image' requiere un conjunto de selección para subcampos! + } } ``` From 455ac06ba922b113e9739750a4b6479392db8a6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:35 -0500 Subject: [PATCH 0474/1534] New translations graphql-validations-migration-guide.mdx (Japanese) --- .../release-notes/graphql-validations-migration-guide.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/ja/resources/release-notes/graphql-validations-migration-guide.mdx index b004e14d9f98..9a8bcf34625f 100644 --- a/website/src/pages/ja/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/ja/resources/release-notes/graphql-validations-migration-guide.mdx @@ -521,7 +521,7 @@ query { } ``` -\_注: `@stream`、`@live`、`@defer` はサポートされていません。 +_注: `@stream`、`@live`、`@defer` はサポートされていません。 **ディレクティブは、この場所で 1 回だけ使用できます (#UniqueDirectivesPerLocationRule)** From 0b427889e3ea3e2038853d5ea5eecfc53d3f5d7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:38 -0500 Subject: [PATCH 0475/1534] New translations graphql-validations-migration-guide.mdx (Portuguese) --- .../graphql-validations-migration-guide.mdx | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/website/src/pages/pt/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/pt/resources/release-notes/graphql-validations-migration-guide.mdx index 7b94db58a11d..75c8799b9286 100644 --- a/website/src/pages/pt/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/pt/resources/release-notes/graphql-validations-migration-guide.mdx @@ -256,7 +256,8 @@ query { } ``` -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** (Campos em conflito com argumentos) +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** +(Campos em conflito com argumentos) ```graphql # Argumentos diferentes podem levar a dados diferentes, @@ -465,10 +466,10 @@ Estas referências desconhecidas devem ser consertadas: - caso contrário, remova ### Fragment: invalid spread or definition - (Fragment: espalhamento ou definição inválidos) -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** (Espalhamento de fragment inválido) +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** +(Espalhamento de fragment inválido) Um Fragment não pode ser espalhado em um tipo não aplicável. @@ -508,7 +509,8 @@ fragment inlineFragOnScalar on Dog { ### Uso de Diretivas -**Directive cannot be used at this location (#KnownDirectivesRule)** (A diretiva não pode ser usada neste local) +**Directive cannot be used at this location (#KnownDirectivesRule)** +(A diretiva não pode ser usada neste local) Apenas diretivas GraphQL (`@...`) apoiadas pela API do The Graph podem ser usadas. @@ -525,7 +527,8 @@ query { _Nota: `@stream`, `@live`, e `@defer` não têm apoio._ -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** (A diretiva só pode ser usada neste local uma vez) +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** +(A diretiva só pode ser usada neste local uma vez) As diretivas apoiadas pelo The Graph só podem ser usadas uma vez por local. From aebc9b4463b071fe81d790c9eb57442b5b8e6e5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:39 -0500 Subject: [PATCH 0476/1534] New translations graphql-validations-migration-guide.mdx (Russian) --- .../graphql-validations-migration-guide.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/ru/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/ru/resources/release-notes/graphql-validations-migration-guide.mdx index b7cb792259b3..25238b858a50 100644 --- a/website/src/pages/ru/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/ru/resources/release-notes/graphql-validations-migration-guide.mdx @@ -284,8 +284,8 @@ query { ```graphql query { - # В конце концов, у нас есть два определения "x", указывающие - # на разные поля! + # В конце концов, у нас есть два определения "x", указывающие + # на разные поля! ...A ...B } @@ -437,7 +437,7 @@ query { ```graphql query purposes { # Если в схеме "name" определено как "String", - # этот запрос не пройдёт валидацию. + # этот запрос не пройдёт валидацию. purpose(name: 1) { id } @@ -447,8 +447,8 @@ query purposes { query purposes($name: Int!) { # Если "name" определено в схеме как `String`, - # этот запрос не пройдёт валидацию, потому что - # используемая переменная имеет тип `Int` + # этот запрос не пройдёт валидацию, потому что + # используемая переменная имеет тип `Int` purpose(name: $name) { id } From ad1b8ce9b231b307fec476bed5f6dda9b956c5a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:41 -0500 Subject: [PATCH 0477/1534] New translations graphql-validations-migration-guide.mdx (Chinese Simplified) --- .../release-notes/graphql-validations-migration-guide.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/zh/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/zh/resources/release-notes/graphql-validations-migration-guide.mdx index 8c4d5c52d93a..1493a96c8f55 100644 --- a/website/src/pages/zh/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/zh/resources/release-notes/graphql-validations-migration-guide.mdx @@ -6,9 +6,9 @@ title: GraphQL验证迁移指南 “graph-节点”的早期版本不支持所有验证,并提供了更优雅的响应——因此,在出现歧义的情况下,“graph-节点”会忽略无效的GraphQL操作组件。 -GraphQL验证支持是即将推出的新功能和Graph网络规模性能的支柱。 +GraphQL验证支持是即将推出的新功能和The Graph网络规模性能的支柱。 -它还将确保查询响应的确定性,这是Graph网络的一个关键要求。 +它还将确保查询响应的确定性,这是The Graph网络的一个关键要求。 **启用GraphQL验证将中断发送到Graph API的一些现有查询**。 @@ -58,7 +58,7 @@ npx@graphql验证/cli-shttps://api-npx @graphql-validate/cli -s https://api-next 您可以尝试将查询发送到: -- `https://api-next.thegraph.com/subgraphs/id/` +- `https://api-next.thegraph.com/subgraphs/id` 或者 From 18c903b9fea0c17a094880619b6940779d17ac98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:42 -0500 Subject: [PATCH 0478/1534] New translations graphql-validations-migration-guide.mdx (Urdu (Pakistan)) --- .../release-notes/graphql-validations-migration-guide.mdx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ur/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/ur/resources/release-notes/graphql-validations-migration-guide.mdx index 569f6a35898b..3d1289e29dd2 100644 --- a/website/src/pages/ur/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/ur/resources/release-notes/graphql-validations-migration-guide.mdx @@ -4,7 +4,8 @@ title: GraphQL کی توثیق کی منتقلی گائیڈ جلد ہی `گراف نوڈ` [GraphQL توثیق کی تفصیلات](https://spec.graphql.org/June2018/#sec-Validation) کی 100% کوریج کو سپورٹ کرے گا. -`گراف نوڈ` کے پچھلے ورژن تمام توثیقوں کی حمایت نہیں کرتے تھے اور زیادہ خوبصورت جوابات فراہم کرتے تھے - لہذا، ابہام کی صورت میں، `گراف نوڈ` غلط گراف کیو ایل آپریشن کے اجزاء کو نظر انداز کر رہا تھا. +`گراف نوڈ` کے پچھلے ورژن تمام توثیقوں کی حمایت نہیں کرتے تھے اور زیادہ خوبصورت جوابات فراہم کرتے تھے - لہذا، ابہام کی صورت میں، `گراف نوڈ` غلط گراف کیو ایل + آپریشن کے اجزاء کو نظر انداز کر رہا تھا. GraphQL ویلیڈیشن سپورٹ آنے والی نئی خصوصیات اور گراف نیٹ ورک کے پیمانے پر کارکردگی کا ایک ستون ہے. @@ -20,7 +21,7 @@ GraphQL ویلیڈیشن سپورٹ آنے والی نئی خصوصیات اور آپ اپنے GraphQL آپریشنز میں کسی بھی مسئلے کو تلاش کرنے اور انہیں ٹھیک کرنے کے لیے CLI مائیگریشن ٹول استعمال کر سکتے ہیں۔ متبادل طور پر آپ `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` اینڈ پوائنٹ استعمال کرنے کے لیے اپنے GraphQL کلائنٹ کے اینڈ پوائنٹ کو اپ ڈیٹ کر سکتے ہیں۔ اس اختتامی نقطہ کے خلاف اپنے کیوریز کی جانچ کرنے سے آپ کو اپنے کیوریز میں مسائل تلاش کرنے میں مدد ملے گی. -> اگر آپ [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) یا [ GraphQL کوڈ جنریٹر] (/https://the-guild.dev) استعمال کر رہے ہیں تو تمام سب گراف کو منتقل کرنے کی ضرورت نہیں ہوگی۔ /graphql/codegen، وہ پہلے ہی اس بات کو یقینی بناتے ہیں کہ آپ کے کیوریز درست ہیں. +> اگر آپ [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) یا [ GraphQL کوڈ جنریٹر] (//https://the-guild.dev) استعمال کر رہے ہیں تو تمام سب گراف کو منتقل کرنے کی ضرورت نہیں ہوگی۔ /graphql/codegen، وہ پہلے ہی اس بات کو یقینی بناتے ہیں کہ آپ کے کیوریز درست ہیں. ## مائیگریشن CLI ٹول From 35739c62535654c3a59e87b63d432cba53fdcce5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:45 -0500 Subject: [PATCH 0479/1534] New translations curating.mdx (Romanian) --- website/src/pages/ro/resources/roles/curating.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ro/resources/roles/curating.mdx b/website/src/pages/ro/resources/roles/curating.mdx index 95881613cc02..1cc05bb7b62f 100644 --- a/website/src/pages/ro/resources/roles/curating.mdx +++ b/website/src/pages/ro/resources/roles/curating.mdx @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. How often can I update my subgraph? From 2bf77a7f947b69a1832ddb1ab115d2b7e0ccb95d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:46 -0500 Subject: [PATCH 0480/1534] New translations curating.mdx (French) --- .../src/pages/fr/resources/roles/curating.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/fr/resources/roles/curating.mdx b/website/src/pages/fr/resources/roles/curating.mdx index 53c40ce9b0f5..909aa9f0e848 100644 --- a/website/src/pages/fr/resources/roles/curating.mdx +++ b/website/src/pages/fr/resources/roles/curating.mdx @@ -8,31 +8,31 @@ Les Curateurs jouent un rôle essentiel dans l'économie décentralisée de The Avant que les consommateurs ne puissent interroger un subgraphs, celui-ci doit être indexé. C'est ici que la curation entre en jeu. Afin que les Indexeurs puissent gagner des frais de requête substantiels sur des subgraphs de qualité, ils doivent savoir quels subgraphs indexer. Lorsque les Curateurs signalent un subgraphs , ils indiquent aux Indexeurs qu'un subgraphs est demandé et de qualité suffisante pour être indexé. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Les Curateurs rendent le réseau The Graph efficace et le [signalement](#how-to-signal) est le processus que les Curateurs utilisent pour informer les Indexeurs qu'un subgraph est bon à indexer. Les Indexeurs peuvent se fier au signal d’un Curateur car, en signalant, les Curateurs mintent une part de curation (curation share) pour le subgraph, leur donnant droit à une partie des futurs frais de requête générés par ce subgraph. Les signaux des Curateurs sont représentés par des jetons ERC20 appelés Graph Curation Shares (GCS). Ceux qui veulent gagner plus de frais de requête doivent signaler leurs GRT aux subgraphs qui, selon eux, généreront un flux important de frais pour le réseau. Les Curateurs ne peuvent pas être réduits pour mauvais comportement, mais il y a une taxe de dépôt sur les Curateurs pour dissuader les mauvaises décisions pouvant nuire à l'intégrité du réseau. Les Curateurs gagneront également moins de frais de requête s'ils sélectionnent un subgraph de mauvaise qualité car il y aura moins de requêtes à traiter ou moins d'Indexeurs pour les traiter. -Le [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) garantit l'indexation de tous les subgraphs. Signaler du GRT sur un subgraph particulier attirera plus d'indexeurs. Cette incitation d'indexeurs supplémentaires à travers la curation vise à améliorer la qualité du service pour les requêtes en réduisant la latence et en améliorant la disponibilité du réseau. +L’[Indexer Sunrise Upgrade](/archived/sunrise/#what-is-the-upgrade-indexer) assure l'indexation de tous les subgraphs, toutefois, signaler des GRT sur un subgraph spécifique attirera davantage d’Indexeurs vers ce dernier. Cette incitation supplémentaire a pour but d’améliorer la qualité de service pour les requêtes en réduisant la latence et en améliorant la disponibilité du réseau. Lors du signalement, les Curateurs peuvent décider de signaler une version spécifique du subgraph ou de signaler en utilisant l'auto-migration. S'ils signalent en utilisant l'auto-migration, les parts d'un Curateur seront toujours mises à jour vers la dernière version publiée par le développeur. S'ils décident de signaler une version spécifique, les parts resteront toujours sur cette version spécifique. -Si vous avez besoin d'assistance avec la curation pour améliorer la qualité du service, veuillez envoyer une demande à l'équipe Edge & Node à l'adresse support@thegraph.zendesk.com et spécifier les subgraphs pour lesquels vous avez besoin d'assistance. +Si vous avez besoin d’aide pour la curation afin d’améliorer la qualité de service, envoyez une demande à l’équipe Edge & Node à l’adresse support@thegraph.zendesk.com en précisant les subgraphs pour lesquels vous avez besoin d’assistance. Les Indexeurs peuvent trouver des subgraphs à indexer en fonction des signaux de curation qu'ils voient dans Graph Explorer (capture d'écran ci-dessous). -![Les subgraphs d'exploration](/img/explorer-subgraphs.png) +![Subgraphs de l'Explorer](/img/explorer-subgraphs.png) ## Comment signaler -Dans l'onglet Curateur de Graph Explorer, les Curateurs pourront signaler et dé-signaler certains subgraphs en fonction des statistiques du réseau. Pour un aperçu étape par étape de la procédure à suivre dans Graph Explorer, [cliquez ici.](/subgraphs/explorer/) +Dans l'onglet Curateur de Graph Explorer, les curateurs pourront signaler et retirer leur signal sur certains subgraphs en fonction des statistiques du réseau. Pour un guide pas à pas expliquant comment procéder dans Graph Explorer, [cliquez ici.](/subgraphs/explorer/) Un curateur peut choisir de signaler une version spécifique d'un sugraph ou de faire migrer automatiquement son signal vers la version de production la plus récente de ce subgraph. Ces deux stratégies sont valables et comportent leurs propres avantages et inconvénients. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Le signalement sur une version spécifique est particulièrement utile lorsqu'un subgraph est utilisé par plusieurs dapps. Une dapp pourrait avoir besoin de mettre à jour régulièrement le subgraph avec de nouvelles fonctionnalités, tandis qu’une autre dapp pourrait préférer utiliser une version plus ancienne et bien testée du subgraph. Lors de la curation initiale, une taxe standard de 1 % est prélevée. La migration automatique de votre signal vers la version de production la plus récente peut s'avérer utile pour vous assurer que vous continuez à accumuler des frais de requête. Chaque fois que vous effectuez une curation, une taxe de curation de 1 % est appliquée. Vous paierez également une taxe de curation de 0,5 % à chaque migration. Les développeurs de subgraphs sont découragés de publier fréquemment de nouvelles versions - ils doivent payer une taxe de curation de 0,5 % sur toutes les parts de curation migrées automatiquement. -> **Remarque** : La première adresse qui signale un subgraph particulier est considérée comme le premier Curateur et devra faire un travail plus intensif en gaz que les Curateurs suivants, car le premier Curateur initialise les jetons de curation à se partager et transfère également les jetons dans le proxy de The Graph. +> **Remarque**: La première adresse à signaler un subgraph donné est considérée comme le premier curateur et devra effectuer un travail bien plus coûteux en gas que les curateurs suivants, car le premier curateur doit initialiser les tokens de part de curation et transférer les tokens dans le proxy de The Graph. ## Retrait de vos GRT @@ -48,7 +48,7 @@ Cependant, il est recommandé que les Curateurs laissent leur GRT signalé en pl 1. Le marché des requêtes est intrinsèquement jeune chez The Graph et il y a un risque que votre %APY soit inférieur à vos attentes en raison de la dynamique naissante du marché. 2. Frais de curation - lorsqu'un Curateur signale des GRT sur un subgraph, il doit s'acquitter d'une taxe de curation de 1%. Cette taxe est brûlée. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Un subgraph peut échouer à cause d'un bug. Un subgraph qui échoue n'accumule pas de frais de requête. Par conséquent, vous devrez attendre que le développeur corrige le bogue et déploie une nouvelle version. - Si vous êtes abonné à la version la plus récente d'un subgraph, vos parts migreront automatiquement vers cette nouvelle version. Cela entraînera une taxe de curation de 0,5 %. - Si vous avez signalé sur une version spécifique d'un subgraph et qu'elle échoue, vous devrez brûler manuellement vos parts de curation. Vous pouvez alors signaler sur la nouvelle version du subgraph, encourant ainsi une taxe de curation de 1%. @@ -61,14 +61,14 @@ En signalant sur un subgraph, vous gagnerez une part de tous les frais de requê ### 2. Comment décider quels sont les subgraphs de haute qualité sur lesquels on peut émettre un signal ? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Identifier des subgraphs de haute qualité est une tâche complexe, mais il existe de multiples approches.. En tant que Curateur, vous souhaitez trouver des subgraphs fiables qui génèrent un volume de requêtes élevé. Un subgraph fiable peut être précieux s’il est complet, précis et s’il répond aux besoins en données d’une dapp. Un subgraph mal conçu pourrait avoir besoin d'être révisé ou republié, et peut aussi finir par échouer. Il est crucial pour les Curateurs d'examiner l'architecture ou le code d'un subgraph afin d'évaluer sa valeur. Ainsi : - Les Curateurs peuvent utiliser leur compréhension d'un réseau pour essayer de prédire comment un subgraph individuel peut générer un volume de requêtes plus élevé ou plus faible à l'avenir - Les Curateurs doivent également comprendre les métriques disponibles via Graph Explorer. Des métriques telles que le volume de requêtes passées et l'identité du développeur du subgraph peuvent aider à déterminer si un subgraph mérite ou non d'être signalé. ### 3. Quel est le coût de la mise à jour d'un subgraph ? -La migration de vos parts de curation vers une nouvelle version de subgraph entraîne une taxe de curation de 1%. Les Curateurs peuvent choisir de s'abonner à la version la plus récente d'un subgraph. Lorsque les parts de Curateur sont auto-migrées vers une nouvelle version, les Curateurs paieront également une demi-taxe de curation, soit 0,5%, car la mise à jour des subgraphs est une action on-chain qui coûte des frais de gaz. +La migration de vos parts de curation (curation shares) vers une nouvelle version de subgraph entraîne une taxe de curation de 1 %. Les Curateurs peuvent choisir de s'abonner à la dernière version d'un subgraph. Lorsque les parts de Curateurs sont automatiquement migrées vers une nouvelle version, les Curateurs paieront également une demi-taxe de curation, soit 0,5 %, car la mise à niveau (upgrade) des subgraphs est une action onchain qui coûte du gas. ### 4. À quelle fréquence puis-je mettre à jour mon subgraph ? From 910f0f6f2dedda91ce320166d648425835d30120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:47 -0500 Subject: [PATCH 0481/1534] New translations curating.mdx (Spanish) --- website/src/pages/es/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/es/resources/roles/curating.mdx b/website/src/pages/es/resources/roles/curating.mdx index 58e9908bce71..da189f62bf69 100644 --- a/website/src/pages/es/resources/roles/curating.mdx +++ b/website/src/pages/es/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Subgrafos del Explorador](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## Cómo señalar @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. El mercado de consultas es inherentemente joven en The Graph y existe el riesgo de que su APY (Rentabilidad anualizada) sea más bajo de lo esperado debido a la dinámica del mercado que recién está empezando. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Un subgrafo puede fallar debido a un error. Un subgrafo fallido no acumula tarifas de consulta. Como resultado, tendrás que esperar hasta que el desarrollador corrija el error e implemente una nueva versión. - Si estás suscrito a la versión más reciente de un subgrafo, tus acciones se migrarán automáticamente a esa nueva versión. Esto incurrirá un impuesto de curación del 0.5%. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. How often can I update my subgraph? From b28222f1f84a6b79d6099228526789e932177d99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:48 -0500 Subject: [PATCH 0482/1534] New translations curating.mdx (Arabic) --- website/src/pages/ar/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ar/resources/roles/curating.mdx b/website/src/pages/ar/resources/roles/curating.mdx index 64b1388acf30..d2f355055aac 100644 --- a/website/src/pages/ar/resources/roles/curating.mdx +++ b/website/src/pages/ar/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![مستكشف الفرعيةرسم بياني](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## كيفية الإشارة @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. سوق الاستعلام يعتبر حديثا في The Graph وهناك خطر من أن يكون٪ APY الخاص بك أقل مما تتوقع بسبب ديناميكيات السوق الناشئة. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. يمكن أن يفشل ال subgraph بسبب خطأ. ال subgraph الفاشل لا يمكنه إنشاء رسوم استعلام. نتيجة لذلك ، سيتعين عليك الانتظار حتى يصلح المطور الخطأ وينشر إصدارا جديدا. - إذا كنت مشتركا في أحدث إصدار من subgraph ، فسيتم ترحيل migrate أسهمك تلقائيا إلى هذا الإصدار الجديد. هذا سيتحمل ضريبة تنسيق بنسبة 0.5٪. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. How often can I update my subgraph? From 09ca38eeeff87ed504540fdcac369ba203a13af7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:49 -0500 Subject: [PATCH 0483/1534] New translations curating.mdx (Czech) --- website/src/pages/cs/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/cs/resources/roles/curating.mdx b/website/src/pages/cs/resources/roles/curating.mdx index 111e9b5cc1d6..c8b9caf18e2e 100644 --- a/website/src/pages/cs/resources/roles/curating.mdx +++ b/website/src/pages/cs/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Podgrafy průzkumníka](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## Jak signalizovat @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. Trh s dotazy je v Graf ze své podstaty mladý a existuje riziko, že vaše %APY může být nižší, než očekáváte, v důsledku dynamiky rodícího se trhu. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Podgraf může selhat kvůli chybě. Za neúspěšný podgraf se neúčtují poplatky za dotaz. V důsledku toho budete muset počkat, až vývojář chybu opraví a nasadí novou verzi. - Pokud jste přihlášeni k odběru nejnovější verze podgrafu, vaše sdílené položky se automaticky přemigrují na tuto novou verzi. Při tom bude účtována 0,5% kurátorská daň. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. Jaké jsou náklady na aktualizaci podgrafu? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. Jak často mohu svůj podgraf aktualizovat? From 06b8e138111dd6f9a86753b5f5781165d48633ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:50 -0500 Subject: [PATCH 0484/1534] New translations curating.mdx (German) --- .../src/pages/de/resources/roles/curating.mdx | 92 +++++++++---------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/website/src/pages/de/resources/roles/curating.mdx b/website/src/pages/de/resources/roles/curating.mdx index 95881613cc02..7d145d84ab5e 100644 --- a/website/src/pages/de/resources/roles/curating.mdx +++ b/website/src/pages/de/resources/roles/curating.mdx @@ -1,89 +1,89 @@ --- -title: Curating +title: Kuratieren --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Kuratoren sind entscheidend für die dezentrale Wirtschaft von The Graph. Sie nutzen ihr Wissen über das web3-Ökosystem, um die Subgraphen zu bewerten und zu signalisieren, die von The Graph Network indiziert werden sollten. Über den Graph Explorer sehen die Kuratoren die Netzwerkdaten, um Signalisierungsentscheidungen zu treffen. Im Gegenzug belohnt The Graph Network Kuratoren, die auf qualitativ hochwertige Subgraphen hinweisen, mit einem Anteil an den Abfragegebühren, die diese Subgraphen generieren. Die Höhe der signalisierten GRT ist eine der wichtigsten Überlegungen für Indexer bei der Entscheidung, welche Subgraphen indiziert werden sollen. -## What Does Signaling Mean for The Graph Network? +## Was bedeutet Signalisierung für The Graph Network? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Bevor Verbraucher einen Subgraphen abfragen können, muss er indiziert werden. An dieser Stelle kommt die Kuratierung ins Spiel. Damit Indexer erhebliche Abfragegebühren für hochwertige Subgraphen verdienen können, müssen sie wissen, welche Subgraphen indiziert werden sollen. Wenn Kuratoren ein Signal für einen Subgraphen geben, wissen Indexer, dass ein Subgraph gefragt und von ausreichender Qualität ist, so dass er indiziert werden sollte. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Kuratoren machen das The Graph Netzwerk effizient und [signaling](#how-to-signal) ist der Prozess, den Kuratoren verwenden, um Indexer wissen zu lassen, dass ein Subgraph gut zu indizieren ist. Indexer können dem Signal eines Kurators vertrauen, da Kuratoren nach dem Signalisieren einen Kurationsanteil für den Subgraphen prägen, der sie zu einem Teil der zukünftigen Abfragegebühren berechtigt, die der Subgraph verursacht. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Die Signale der Kuratoren werden als ERC20-Token dargestellt, die Graph Curation Shares (GCS) genannt werden. Diejenigen, die mehr Abfragegebühren verdienen wollen, sollten ihre GRT an Subgraphen signalisieren, von denen sie vorhersagen, dass sie einen starken Gebührenfluss an das Netzwerk generieren werden. Kuratoren können nicht für schlechtes Verhalten bestraft werden, aber es gibt eine Einlagensteuer für Kuratoren, um von schlechten Entscheidungen abzuschrecken, die der Integrität des Netzwerks schaden könnten. Kuratoren werden auch weniger Abfragegebühren verdienen, wenn sie einen Subgraphen von geringer Qualität kuratieren, weil es weniger Abfragen zu bearbeiten gibt oder weniger Indexer, die sie bearbeiten. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +Der [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) stellt die Indizierung aller Subgraphen sicher und signalisiert, dass GRT auf einem bestimmten Subgraphen mehr Indexer anzieht. Dieser Anreiz für zusätzliche Indexer durch Kuration zielt darauf ab, die Servicequalität für Abfragen zu verbessern, indem die Latenzzeit verringert und die Netzwerkverfügbarkeit erhöht wird. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +Bei der Signalisierung können Kuratoren entscheiden, ob sie für eine bestimmte Version des Subgraphen signalisieren wollen oder ob sie die automatische Migration verwenden wollen. Bei der automatischen Migration werden die Freigaben eines Kurators immer auf die neueste vom Entwickler veröffentlichte Version aktualisiert. Wenn sie sich stattdessen für eine bestimmte Version entscheiden, bleiben die Freigaben immer auf dieser spezifischen Version. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +Wenn Sie Unterstützung bei der Kuratierung benötigen, um die Qualität des Dienstes zu verbessern, senden Sie bitte eine Anfrage an das Edge & Node-Team unter support@thegraph.zendesk.com und geben Sie die Subgraphen an, für die Sie Unterstützung benötigen. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Indexer können Subgraphen für die Indizierung auf der Grundlage von Kurationssignalen finden, die sie im Graph Explorer sehen (siehe Screenshot unten). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer-Subgrafen](/img/explorer-subgraphs.png) -## How to Signal +## Wie man signalisiert -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Auf der Registerkarte Kurator im Graph Explorer können Kuratoren bestimmte Subgraphen auf der Grundlage von Netzwerkstatistiken an- und abmelden. Einen schrittweisen Überblick über die Vorgehensweise im Graph Explorer finden Sie [hier](/subgraphs/explorer/) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +Ein Kurator kann sich dafür entscheiden, ein Signal für eine bestimmte Subgraph-Version abzugeben, oder er kann sein Signal automatisch auf die neueste Produktionsversion dieses Subgraphen migrieren lassen. Beides sind gültige Strategien und haben ihre eigenen Vor- und Nachteile. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Die Signalisierung einer bestimmten Version ist besonders nützlich, wenn ein Subgraph von mehreren Dapps verwendet wird. Eine Dapp muss den Subgraph vielleicht regelmäßig mit neuen Funktionen aktualisieren. Eine andere Dapp zieht es vielleicht vor, eine ältere, gut getestete Version des Subgraphs zu verwenden. Bei der ersten Kuration fällt eine Standardsteuer von 1% an. -Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. +Die automatische Migration Ihres Signals zum neuesten Produktions-Build kann sich als nützlich erweisen, um sicherzustellen, dass Sie weiterhin Abfragegebühren anfallen. Jedes Mal, wenn Sie kuratieren, fällt eine Kuratierungssteuer von 1 % an. Außerdem zahlen Sie bei jeder Migration eine Kuratierungssteuer von 0,5 %. Subgraph-Entwickler werden davon abgehalten, häufig neue Versionen zu veröffentlichen - sie müssen eine Kurationssteuer von 0,5 % auf alle automatisch migrierten Kurationsanteile zahlen. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Anmerkung**: Die erste Adresse, die einen bestimmten Subgraph signalisiert, wird als erster Kurator betrachtet und muss viel mehr Arbeit leisten als die übrigen folgenden Kuratoren, da der erste Kurator die Kurationsaktien-Token initialisiert und außerdem Token in den Graph-Proxy überträgt. -## Withdrawing your GRT +## Abhebung Ihrer GRT -Curators have the option to withdraw their signaled GRT at any time. +Die Kuratoren haben jederzeit die Möglichkeit, ihre signalisierten GRT zurückzuziehen. -Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). +Anders als beim Delegieren müssen Sie, wenn Sie sich entscheiden, Ihr signalisiertes GRT abzuheben, keine Abkühlungsphase abwarten und erhalten den gesamten Betrag (abzüglich der 1 % Kurationssteuer). -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Sobald ein Kurator sein Signal zurückzieht, können die Indexer den Subgraphen weiter indizieren, auch wenn derzeit kein aktives GRT signalisiert wird. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +Es wird jedoch empfohlen, dass Kuratoren ihr signalisiertes GRT bestehen lassen, nicht nur um einen Teil der Abfragegebühren zu erhalten, sondern auch um die Zuverlässigkeit und Betriebszeit des Subgraphen zu gewährleisten. -## Risks +## Risiken -1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +1. Der Abfragemarkt ist bei The Graph noch sehr jung, und es besteht das Risiko, dass Ihr %APY aufgrund der noch jungen Marktdynamik niedriger ist als Sie erwarten. +2. Kurationsgebühr - wenn ein Kurator GRT auf einem Subgraphen meldet, fällt eine Kurationsgebühr von 1% an. Diese Gebühr wird verbrannt. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. Ein Subgraph kann aufgrund eines Fehlers fehlschlagen. Für einen fehlgeschlagenen Subgraph fallen keine Abfragegebühren an. Daher müssen Sie warten, bis der Entwickler den Fehler behebt und eine neue Version bereitstellt. + - Wenn Sie die neueste Version eines Subgraphen abonniert haben, werden Ihre Anteile automatisch zu dieser neuen Version migriert. Dabei fällt eine Kurationsgebühr von 0,5 % an. + - Wenn Sie für eine bestimmte Version eines Subgraphen ein Signal gegeben haben und dieses fehlschlägt, müssen Sie Ihre Kurationsanteile manuell verbrennen. Sie können dann ein Signal für die neue Subgraph-Version geben, wodurch eine Kurationssteuer von 1 % anfällt. -## Curation FAQs +## FAQs zur Kuration -### 1. What % of query fees do Curators earn? +### 1. Wie viel Prozent der Abfragegebühren verdienen die Kuratoren? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +Durch das Signalisieren auf einem Subgraphen erhalten Sie einen Anteil an allen Abfragegebühren, die der Subgraph generiert. 10 % aller Abfragegebühren gehen an die Kuratoren im Verhältnis zu ihren Kurationsanteilen. Diese 10 % unterliegen der Governance. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. Wie entscheide ich, welche Subgraphen qualitativ hochwertig sind, um sie zu signalisieren? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Die Suche nach qualitativ hochwertigen Subgraphen ist eine komplexe Aufgabe, die auf viele verschiedene Arten angegangen werden kann. Als Kurator möchten Sie nach vertrauenswürdigen Subgraphen suchen, die das Abfragevolumen erhöhen. Ein vertrauenswürdiger Subgraph kann wertvoll sein, wenn er vollständig und genau ist und die Datenanforderungen einer App unterstützt. Ein schlecht entworfener Subgraph muss möglicherweise überarbeitet oder neu veröffentlicht werden und kann auch scheitern. Es ist wichtig, dass die Kuratoren die Architektur oder den Code eines Subgraphen überprüfen, um zu beurteilen, ob ein Subgraph wertvoll ist. Daraus folgt: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Kuratoren können ihr Verständnis eines Netzwerks nutzen, um vorherzusagen, wie ein einzelner Subgraph in der Zukunft ein höheres oder niedrigeres Suchvolumen generieren könnte +- Kuratoren sollten auch die Metriken verstehen, die über den Graph Explorer verfügbar sind. Metriken wie das vergangene Abfragevolumen und die Person des Subgraph-Entwicklers können dabei helfen, festzustellen, ob ein Subgraph eine Meldung wert ist oder nicht. -### 3. What’s the cost of updating a subgraph? +### 3. Wie hoch sind die Kosten für die Aktualisierung eines Subgraphen? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Wenn Sie Ihre Kuratorenanteile auf eine neue Subgraph-Version migrieren, fällt eine Kuratorensteuer von 1 % an. Kuratoren können sich dafür entscheiden, die neueste Version eines Subgraphen zu abonnieren. Wenn Kuratorenanteile automatisch auf eine neue Version migriert werden, zahlen Kuratoren ebenfalls die Hälfte der Kurationssteuer, d. h. 0,5 %, da die Aktualisierung von Subgraphen eine Onchain-Aktion ist, die Gas kostet. -### 4. How often can I update my subgraph? +### 4. Wie oft kann ich meinen Subgraphen aktualisieren? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +Es wird empfohlen, dass Sie Ihre Subgraphen nicht zu häufig aktualisieren. Sehen Sie die obige Frage für weitere Details. -### 5. Can I sell my curation shares? +### 5. Kann ich meine Kurationsanteile verkaufen? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed). +Kurationsanteile können nicht „gekauft“ oder „verkauft“ werden wie andere ERC20-Token, mit denen Sie vielleicht vertraut sind. Sie können nur geprägt (erstellt) oder verbrannt (vernichtet) werden. -As a Curator on Arbitrum, you are guaranteed to get back the GRT you initially deposited (minus the tax). +Als Kurator auf Arbitrum bekommen Sie garantiert die GRT zurück, die Sie ursprünglich eingezahlt haben (abzüglich der Steuer). -### 6. Am I eligible for a curation grant? +### 6. Bin ich für eine Kuratorenförderung berechtigt? -Curation grants are determined individually on a case-by-case basis. If you need assistance with curation, please send a request to support@thegraph.zendesk.com. +Zuschüsse für die Kuration werden von Fall zu Fall entschieden. Wenn Sie Unterstützung bei der Kuratierung benötigen, senden Sie bitte eine Anfrage an support@thegraph.zendesk.com. -Still confused? Check out our Curation video guide below: +Sind Sie immer noch verwirrt? Sehen Sie sich unten unseren Video-Leitfaden zur Kuration an: From c44967015d22010b145f0724b647fb407dd44e76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:52 -0500 Subject: [PATCH 0485/1534] New translations curating.mdx (Italian) --- website/src/pages/it/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/it/resources/roles/curating.mdx b/website/src/pages/it/resources/roles/curating.mdx index 38670a7d95db..330a80715730 100644 --- a/website/src/pages/it/resources/roles/curating.mdx +++ b/website/src/pages/it/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Subgraph Explorer](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## Come segnalare @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. Il mercato delle query è intrinsecamente giovane per The Graph e c'è il rischio che la vostra %APY possa essere inferiore a quella prevista a causa delle dinamiche di mercato nascenti. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Un subgraph può fallire a causa di un bug. Un subgraph fallito non matura commissioni della query. Di conseguenza, si dovrà attendere che lo sviluppatore risolva il bug e distribuisca una nuova versione. - Se siete iscritti alla versione più recente di un subgraph, le vostre quote di partecipazione migreranno automaticamente a quella nuova versione. Questo comporta una tassa di curation di 0,5%. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. Qual è il costo dell'aggiornamento di un subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. Con quale frequenza posso aggiornare il mio subgraph? From 99e38ac99c2c9145aae6065e2830054a063783eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:53 -0500 Subject: [PATCH 0486/1534] New translations curating.mdx (Japanese) --- website/src/pages/ja/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ja/resources/roles/curating.mdx b/website/src/pages/ja/resources/roles/curating.mdx index 8f0a5c3f2b1e..ff0ae8aced25 100644 --- a/website/src/pages/ja/resources/roles/curating.mdx +++ b/website/src/pages/ja/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![エクスプローラー サブグラフ](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## シグナルの出し方 @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. The Graph では、クエリ市場は本質的に歴史が浅く、初期の市場ダイナミクスのために、あなたの%APY が予想より低くなるリスクがあります。 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. サブグラフはバグで失敗することがあります。 失敗したサブグラフは、クエリフィーが発生しません。 結果的に、開発者がバグを修正して新しいバージョンを展開するまで待たなければならなくなります。 - サブグラフの最新バージョンに加入している場合、シェアはその新バージョンに自動移行します。 これには 0.5%のキュレーション税がかかります。 - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. サブグラフの更新にかかるコストはいくらですか? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. サブグラフはどれくらいの頻度で更新できますか? From 63c917c203623631bc26dff64fd66b332cd979fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:54 -0500 Subject: [PATCH 0487/1534] New translations curating.mdx (Korean) --- website/src/pages/ko/resources/roles/curating.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ko/resources/roles/curating.mdx b/website/src/pages/ko/resources/roles/curating.mdx index 95881613cc02..1cc05bb7b62f 100644 --- a/website/src/pages/ko/resources/roles/curating.mdx +++ b/website/src/pages/ko/resources/roles/curating.mdx @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. How often can I update my subgraph? From abd63e516f1918f0e80ea760ca862e74c08858de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:55 -0500 Subject: [PATCH 0488/1534] New translations curating.mdx (Dutch) --- website/src/pages/nl/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/nl/resources/roles/curating.mdx b/website/src/pages/nl/resources/roles/curating.mdx index 35ca8fffe48d..99c74778c9bd 100644 --- a/website/src/pages/nl/resources/roles/curating.mdx +++ b/website/src/pages/nl/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Verken Subgraphs](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## Hoe werkt het Signaleren @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. De querymarkt is nog jong bij het Graph Netwerk en er bestaat een risico dat je %APY lager kan zijn dan je verwacht door opkomende marktdynamiek. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Een subgraph kan stuk gaan door een bug. Een subgraph die stuk is gegenereerd geen querykosten. Als gevolg hiervan moet je wachten tot de ontwikkelaar de bug repareert en een nieuwe versie implementeert. - Als je bent geabonneerd op de nieuwste versie van een subgraph, worden je curatieaandelen automatisch gemigreerd naar die nieuwe versie. Er is een curatiebelasting van 0,5%. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### Wat zijn de kosten voor het updaten van een subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### Hoe vaak kan ik mijn subgraph updaten? From a676691bc126bb52d579d8fc26f4dceacad1258c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:56 -0500 Subject: [PATCH 0489/1534] New translations curating.mdx (Polish) --- website/src/pages/pl/resources/roles/curating.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/pl/resources/roles/curating.mdx b/website/src/pages/pl/resources/roles/curating.mdx index 95881613cc02..1cc05bb7b62f 100644 --- a/website/src/pages/pl/resources/roles/curating.mdx +++ b/website/src/pages/pl/resources/roles/curating.mdx @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. How often can I update my subgraph? From 66b839062e735400c48a27d25ca7c691fd783590 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:57 -0500 Subject: [PATCH 0490/1534] New translations curating.mdx (Portuguese) --- .../src/pages/pt/resources/roles/curating.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/pt/resources/roles/curating.mdx b/website/src/pages/pt/resources/roles/curating.mdx index 50a71973ff4c..582a7926b9ee 100644 --- a/website/src/pages/pt/resources/roles/curating.mdx +++ b/website/src/pages/pt/resources/roles/curating.mdx @@ -8,31 +8,31 @@ Curadores são importantes para a economia descentralizada do The Graph. Eles ut Antes que consumidores possam indexar um subgraph, ele deve ser indexado. É aqui que entra a curadoria. Para que Indexadores ganhem taxas de query substanciais em subgraphs de qualidade, eles devem saber quais subgraphs indexar. Quando Curadores sinalizam um subgraph, isto diz aos Indexadores que um subgraph está em demanda e tem qualidade suficiente para ser indexado. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Os Curadores trazem eficiência à Graph Network, e a [sinalização](#how-to-signal) é o processo que curadores usam para avisar aos Indexadores que um subgraph é bom para indexar. Os Indexadores podem confiar no sinal de um Curador, porque ao sinalizar, os Curadores mintam uma ação de curadoria para o subgraph, o que concede aos Curadores uma porção das futuras taxas de query movidas pelo subgraph. Sinais de curador são representados como tokens ERC20 chamados de Ações de Curadoria do Graph (GCS). Quem quiser ganhar mais taxas de query devem sinalizar o seu GRT a subgraphs que apostam que gerará um fluxo forte de taxas á rede. Curadores não podem ser cortados por mau comportamento, mas há uma taxa de depósito em Curadores para desincentivar más decisões que possam ferir a integridade da rede. Curadores também ganharão menos taxas de query se curarem um subgraph de baixa qualidade, já que haverão menos queries a processar ou menos Indexadores para processá-las. -O [Indexador de Atualização do Nascer do Sol](/archived/sunrise/#what-is-the-upgrade-indexer) garante a indexação de todos os subgraphs, e que sinalizar GRT em um subgraph em particular atrairá mais Indexadores a ele. Este incentivo de Indexadores adicionais via curadoria visa melhorar a qualidade do serviço de queries ao reduzir a latência e melhorar a disponibilidade da rede. +O [Indexador de Atualização do Nascer do Sol](/sunrise/#what-is-the-upgrade-indexer) garante a indexação de todos os subgraphs; sinalizar GRT em um subgraph específico atrairá mais Indexadores a ele. Este incentivo para Indexadores através da curadoria visa melhorar a qualidade do serviço de queries através da redução de latência e do aprimoramento da disponibilidade de rede. Ao sinalizar, Curadores podem decidir entre sinalizar numa versão específica do subgraph ou sinalizar com a automigração. Caso sinalizem com a automigração, as ações de um curador sempre serão atualizadas à versão mais recente publicada pelo programador. Se decidirem sinalizar numa versão específica, as ações sempre permanecerão nesta versão específica. -Se precisar de ajuda com a curadoria para melhorar a qualidade do serviço, mande um pedido à equipa da Edge & Node em support@thegraph.zendesk.com e especifique os subgraphs com que precisa de ajuda. +Se precisar de ajuda com a curadoria para melhorar a qualidade do serviço, peça ajuda à equipa da Edge Node em support@thegraph.zendesk.com e especifique os subgraphs com que precisa de assistência. Os indexadores podem achar subgraphs para indexar com base em sinais de curadoria que veem no Graph Explorer (imagem abaixo). -![Subgraphs no Explorer](/img/explorer-subgraphs.png) +![Subgraphs do Explorer](/img/explorer-subgraphs.png) ## Como Sinalizar -Dentro da aba Curator (Curador) no Graph Explorer, os curadores poderão sinalizar ou retirar os seus sinais em certos subgraphs com base nas estatísticas da rede. Para uma explicação passo a passo deste processo no Explorer, [clique aqui.](/subgraphs/explorer/) +Na aba "Curator" (Curador) do Graph Explorer, os curadores podem sinalizar e tirar sinal de certos subgraphs baseados nas estatísticas de rede. [Clique aqui](/subgraphs/explorer/) para um passo-a-passo deste processo no Graph Explorer. Um curador pode escolher sinalizar uma versão específica de subgraph, ou pode automaticamente migrar o seu sinal à versão mais recente desse subgraph. Ambas estratégias são válidas, e vêm com as suas próprias vantagens e desvantagens. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Sinalizar numa versão específica serve muito mais quando um subgraph é usado por vários dApps. Um dApp pode precisar atualizar o subgraph regularmente com novos recursos; outro dApp pode preferir usar uma versão mais antiga, porém melhor testada. Na curadoria inicial, é incorrida uma taxa de 1%. Ter um sinal que migra automaticamente à build mais recente de um subgraph pode ser bom para garantir o acúmulo de taxas de consulta. Toda vez que cura, é incorrida uma taxa de 1% de curadoria. Também pagará uma taxa de 0.5% em toda migração. É recomendado que rogramadores de subgraphs evitem editar novas versões com frequência - eles devem pagar uma taxa de curadoria de 0.5% em todas as ações de curadoria auto-migradas. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> \*\*Nota: O primeiro endereço a sinalizar um subgraph particular é considerado o primeiro curador e deverá realizar tarefas muito mais intensivas em gas do que o resto dos curadores seguintes — porque o primeiro curador inicializa os tokens de ação de curadoria, inicializa o bonding curve, e também transfere tokens no proxy do Graph. ## Como Sacar o Seu GRT @@ -47,8 +47,8 @@ Porém, é recomendado que curadores deixem o seu GRT no lugar, não apenas para ## Riscos 1. O mercado de consulta é jovem por natureza no The Graph, e há sempre o risco do seu rendimento anual ser menor que o esperado devido às dinâmicas nascentes do mercado. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +2. Taxa de Curadoria - Quando um curador sinaliza GRT em um subgraph, ele incorre uma taxa de curadoria de 1%, que é queimada. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Um subgraph pode falhar devido a um erro de código. Um subgraph falho não acumula taxas de consulta. Portanto, espere até o programador consertar o erro e lançar uma nova versão. - Caso se inscreva à versão mais recente de um subgraph, suas ações migrarão automaticamente a esta versão nova. Isto incorrerá uma taxa de curadoria de 0.5%. - Se sinalizou em um subgraph específico e ele falhou, deverá queimar as suas ações de curadoria manualmente. Será então possível sinalizar na nova versão do subgraph, o que incorre uma taxa de curadoria de 1%. @@ -61,10 +61,10 @@ Ao sinalizar em um subgraph, ganhará parte de todas as taxas de query geradas p ### 2. Como decidir quais subgraphs são de qualidade alta para sinalizar? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Achar subgraphs de alta qualidade é uma tarefa complexa, mas o processo pode ser abordado de várias formas diferentes. Como Curador, procure subgraphs confiáveis que movem volumes de query. Um subgraph confiável pode ser valioso se for completo, preciso, e apoiar as necessidades de dados de um dApp. Um subgraph mal arquitetado pode precisar de revisões ou reedições, além de correr risco de falhar. É importante que os Curadores verifiquem a arquitetura ou código de um subgraph, para averiguar se ele é valioso. Portanto: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Os curadores podem usar o seu conhecimento de uma rede para tentar adivinhar como um subgraph individual pode gerar um volume maior ou menor de queries no futuro +- Os curadores também devem entender as métricas disponíveis através do Graph Explorer. Métricas como o volume de queries passados e a identidade do programador do subgraph podem ajudar a determinar se um subgraph vale ou não o sinal. ### 3. Qual o custo de atualizar um subgraph? @@ -76,7 +76,7 @@ Não atualize os seus subgraphs com frequência excessiva. Veja a questão acima ### 5. Posso vender as minhas ações de curadoria? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed). +Ações de curadoria não podem ser "compradas" ou "vendidas" como outros tokens de ERC-20; apenas mintadas (criadas) ou queimadas (destruídas). Como um Curador no Arbitrum, é garantido que você receberá o GRT que depositou inicialmente (menos a taxa). From 7cce2248df1dea60230fe6e5aae434f6e033a3f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:58 -0500 Subject: [PATCH 0491/1534] New translations curating.mdx (Russian) --- website/src/pages/ru/resources/roles/curating.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ru/resources/roles/curating.mdx b/website/src/pages/ru/resources/roles/curating.mdx index 5665d642c18f..ef319cda705e 100644 --- a/website/src/pages/ru/resources/roles/curating.mdx +++ b/website/src/pages/ru/resources/roles/curating.mdx @@ -14,13 +14,13 @@ Curator signals are represented as ERC20 tokens called Graph Curation Shares (GC The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +При подаче сигнала Кураторы могут решить подать сигнал на определенную версию субграфа или использовать автомиграцию. Если они подают сигнал с помощью автомиграции, доли куратора всегда будут обновляться до последней версии, опубликованной разработчиком. Если же они решат подать сигнал на определенную версию, доли всегда будут оставаться на этой конкретной версии. If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Explorer по подграфам](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## Как подавать Сигнал @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. Рынок запросов в The Graph по своей сути молод, и существует риск того, что ваш %APY может оказаться ниже, чем вы ожидаете, из-за зарождающейся динамики рынка. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Подграф может выйти из строя из-за ошибки. За неудавшийся подграф не начисляется плата за запрос. В результате вам придется ждать, пока разработчик исправит ошибку и выложит новую версию. - Если вы подписаны на новейшую версию подграфа, ваши общие ресурсы автоматически перейдут на эту новую версию. При этом будет взиматься кураторская комиссия в размере 0,5%. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. Какова стоимость обновления подграфа? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. Как часто я могу обновлять свой подграф? From cea656df481060c6e47e21a2876a8c8066a32b11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:52:59 -0500 Subject: [PATCH 0492/1534] New translations curating.mdx (Swedish) --- website/src/pages/sv/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/sv/resources/roles/curating.mdx b/website/src/pages/sv/resources/roles/curating.mdx index 66d272ff32df..fa6a279e5b1e 100644 --- a/website/src/pages/sv/resources/roles/curating.mdx +++ b/website/src/pages/sv/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Utforska subgrafer](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## Hur man Signaliserar @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. Frågemarknaden är i grunden ung på The Graph och det finns en risk att din %APY kan vara lägre än du förväntar dig på grund av tidiga marknadsmekanik. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. En subgraf kan misslyckas på grund av en bugg. En misslyckad subgraf genererar inte frågeavgifter. Som ett resultat måste du vänta tills utvecklaren rättar felet och distribuerar en ny version. - Om du prenumererar på den nyaste versionen av en subgraf kommer dina andelar automatiskt att migreras till den nya versionen. Detta kommer att medföra en kuratoravgift på 0,5%. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. Vad kostar det att uppdatera en subgraf? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. Hur ofta kan jag uppdatera min subgraf? From a5f5cbe32a88db88ce7234c20f68a6c168ed70ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:00 -0500 Subject: [PATCH 0493/1534] New translations curating.mdx (Turkish) --- .../src/pages/tr/resources/roles/curating.mdx | 86 +++++++++---------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/website/src/pages/tr/resources/roles/curating.mdx b/website/src/pages/tr/resources/roles/curating.mdx index d99119e1166c..33d63ae0f0bb 100644 --- a/website/src/pages/tr/resources/roles/curating.mdx +++ b/website/src/pages/tr/resources/roles/curating.mdx @@ -1,89 +1,89 @@ --- -title: Curating +title: Kürasyon --- -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. +Küratörler The Graph'in merkeziyetsiz ekonomisi için kritik öneme sahiptir. Web3 ekosistemi hakkındaki bilgilerini kullanarak, The Graph Ağı tarafından endekslenmesi gereken subgraph’leri değerlendirir ve bunlara sinyal verirler. Küratörler Graph Gezgini aracılığıyla ağ verilerini inceleyerek sinyal verip vermeme kararını alır. The Graph Ağı, iyi kaliteye sahip subgraph’lere sinyal veren küratörleri, bu subgraph’lerin ürettiği sorgu ücretlerinden bir pay ile ödüllendirir. Sinyallenen GRT miktarı endeksleyiciler için hangi subgraph'leri endeksleyeceklerini belirlerken önemli bir faktördür. -## What Does Signaling Mean for The Graph Network? +## Sinyal Verme, The Graph Ağı için Ne Anlama Geliyor? -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. +Bir subgraph'in tüketiciler tarafından sorgulanabilmesi için subgraph önce endekslenmelidir. İşte burada kürasyon devreye girer. Endeksleyicilerin kaliteli subgraph’lerden kayda değer sorgu ücretleri kazanabilmesi için hangi subgraph’leri endeksleyeceklerini bilmeleri gerekir. Küratörler bir subgraph’e sinyal verdiğinde bu, endeksleyicilere o subgraph’in talep gördüğünü ve yeterli kaliteye sahip olduğunu gösterir. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Küratörler, The Graph ağını verimli hale getirirler. [Sinyalleme](#how-to-signal), Küratörlerin Endeksleyicilere hangi subgraph'in endekslenmeye uygun olduğunu bildirmelerini sağlayan süreçtir. Endeksleyiciler, bir Küratörden gelen sinyale güvenebilir çünkü sinyalleme sırasında, Küratörler subgraph için bir kürasyon payı üretir. Bu da onları subgraph'in sağladığı gelecekteki sorgu ücretlerinin bir kısmına hak sahibi kılar. -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. +Küratör sinyalleri, Graph Kürasyon Payları (Graph Curation Shares - GCS) olarak adlandırılan ERC20 token ile temsil edilir. Daha fazla sorgu ücreti kazanmak isteyenler, GRT’lerini ağ için güçlü bir ücret akışı yaratacağını öngördükleri subgraph’lere sinyal vermelidir. Küratörler kötü davranışları nedeniyle cezalandırılmaz (slashing uygulanmaz), ancak ağın bütünlüğüne zarar verebilecek kötü kararları caydırmak için bir depozito vergisi bulunur. Düşük kaliteli bir subgraph üzerinde kürasyon yapan Küratörler, daha az sorgu olduğu ya da daha az Endeksleyici tarafından işlendiği için daha az sorgu ücreti kazanır. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +[Sunrise Yükseltme Endeksleyici](/archived/sunrise/#what-is-the-upgrade-indexer) tüm subgraph'lerin endekslenmesini sağlar. Belirli bir subgraph'e GRT sinyallenmesi o subgraph'e daha fazla endeksleyici çeker. Kürasyon yoluyla ek Endeksleyicilerin teşvik edilmesi, sorgu hizmetinin kalitesini artırmayı amaçlar ve ağ erişilebilirliğini artırarak gecikmeyi azaltır. -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. +Sinyal verirken, Küratörler belirli bir subgraph sürümüne sinyal vermeyi veya otomatik geçiş (auto-migrate) özelliğini kullanmayı seçebilirler. Eğer otomatik geçiş özelliğini kullanarak sinyal verirlerse, bir küratörün payları her zaman geliştirici tarafından yayımlanan en son sürüme göre güncellenir. Bunun yerine belirli bir sürüme sinyal vermeyi seçerlerse, paylar her zaman bu belirli sürümdeki haliyle kalır. -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. +Hizmet kalitenizi artırmak için kürasyon konusunda yardıma ihtiyacınız varsa, lütfen Edge & Node ekibine support@thegraph.zendesk.com adresinden bir talep gönderin ve yardıma ihtiyacınız olan subgraph'leri belirtin. -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). +Endeksleyiciler, Graph Gezgini'nde gördükleri kürasyon sinyallerine dayanarak endeksleyecekleri subgraph’leri bulabilirler (aşağıdaki ekran görüntüsüne bakın). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Gezgin subgraph'leri](/img/explorer-subgraphs.png) -## How to Signal +## Nasıl Sinyal Verilir -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Graph Gezgini'ndeki Küratör sekmesi içinde, küratörler ağ istatistiklerine dayalı olarak belirli subgraph'lere sinyal verip kaldırabilecekler. Bunu Graph Gezgini'nde nasıl yapacağınıza dair adım adım bir genel bakış için, [buraya tıklayın.](/subgraphs/explorer/) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +Bir küratör, belirli bir subgraph sürümü üzerinde sinyal vermeyi seçebilir veya sinyalinin otomatik olarak o subgraph'in en yeni üretim sürümüne taşınmasını tercih edebilir. Her iki strateji de geçerli olup kendi avantaj ve dezavantajlarına sahiptir. -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Belirli bir sürüme sinyal vermek, özellikle bir subgraph birden fazla dapp tarafından kullanıldığında faydalıdır. Bir dapp, subgraph'ini yeni özelliklerle düzenli olarak güncellemek isteyebilir. Diğer bir dapp ise daha eski, iyi test edilmiş bir subgraph sürümünü kullanmayı tercih edebilir. İlk kürasyon sırasında, %1'lik standart bir vergi alınır. -Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. +Sinyalinizin otomatik olarak en yeni üretim sürümüne geçiş yapması, sorgu ücretlerini biriktirmeye devam etmenizi sağlamak açısından değerli olabilir. Her kürasyon yaptığınızda %1'lik bir kürasyon vergisi uygulanır. Ayrıca her geçişte %0,5'lik bir kürasyon vergisi ödersiniz. Subgraph geliştiricilerinin sık sık yeni sürümler yayımlaması teşvik edilmez - geliştiriciler otomatik olarak taşınan tüm kürasyon payları için %0,5 kürasyon vergisi ödemek zorundadırlar. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. +> **Not**: Belirli bir subgraph'e ilk kez sinyal veren adres ilk küratör olarak kabul edilir. Bu ilk sinyal işlemi, sonraki küratörlerinkine kıyasla çok daha fazla gaz tüketen bir işlemdir. Bunun nedeni, ilk küratörün kürasyon payı token'larını ilklendirmesi ve ayrıca token'ları The Graph proxy'sine aktarmasıdır. -## Withdrawing your GRT +## GRT'nizi Çekme -Curators have the option to withdraw their signaled GRT at any time. +Küratörler, sinyal verdikleri GRT'yi istedikleri zaman çekme seçeneğine sahiptir. -Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). +Yetkilendirme sürecinden farklı olarak, sinyal verdiğiniz GRT'yi çekmeye karar verirseniz bir bekleme süresiyle karşılaşmazsınız ve (%1 kürasyon vergisi düşüldükten sonra) toplam miktarı alırsınız. -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. +Bir küratör sinyalini çektikten sonra, endeksleyiciler aktif olarak sinyal verilmiş GRT olmasa bile subgraph'i endekslemeye devam etmeyi seçebilirler. -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. +Ancak, küratörlerin sinyal verdikleri GRT'yi yerinde bırakmaları tavsiye edilir; bu yalnızca sorgu ücretlerinden pay almak için değil, aynı zamanda subgraph'in güvenilirliğini ve kesintisiz çalışmasını sağlamak için de önemlidir. -## Risks +## Riskler -1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +1. The Graph üzerindeki sorgu pazarı henüz nispeten yenidir ve erken aşama piyasa dinamikleri nedeniyle %APY'nin beklediğinizden daha düşük olması riski mevcuttur. +2. Kürasyon Ücreti - Bir küratör bir subgraph'e GRT ile sinyal verdiğinde, %1'lik bir kürasyon vergisine tabi olur. Bu ücret yakılır. +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). +4. Bir subgraph, bir hata nedeniyle başarısız olabilir. Başarısız subgraph sorgu ücreti biriktirmez. Bu sebeple, geliştiricinin hatayı düzeltip yeni bir sürüm dağıtmasını beklemeniz gerekecektir. + - Eğer bir subgraph'in en yeni sürümüne aboneyseniz, paylarınız otomatik olarak o yeni sürüme geçecektir. Bu geçiş sırasında %0,5'lik bir kürasyon vergisi uygulanır. + - Belirli bir subgraph sürümüne sinyal verdiyseniz ve bu sürüm başarısız olduysa, kürasyon paylarınızı manuel olarak yakmanız gerekir. Daha sonra yeni subgraph sürümüne sinyal verebilirsiniz; bu işlem sırasında %1'lik bir kürasyon vergisi uygulanır. -## Curation FAQs +## Kürasyon Hakkında SSS -### 1. What % of query fees do Curators earn? +### 1. Küratörler, sorgu ücretlerinin yüzde kaçını kazanır? -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. +Bir subgraph'e sinyal vererek, subgraph'in ürettiği tüm sorgu ücretlerinden pay alırsınız. Tüm sorgu ücretlerinin %10'u, kürasyon paylarına orantılı olarak Küratörlere gider. Bu %10'luk oran yönetişime tabidir (yani yönetişim kararlarıyla değiştirilebilir). -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. Sinyal vereceğim subgraph'lerin hangilerinin yüksek kaliteli olduğunu nasıl belirlerim? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Yüksek kaliteli subgraph'leri bulmak karmaşık bir iştir. Ancak bu duruma farklı şekillerde yaklaşılabilir. Bir Küratör olarak, sorgu hacmi oluşturan güvenilir subgraph'ler aramak istersiniz. Güvenilir bir subgraph; tamamlanmış, doğru ve bir dapp’in veri ihtiyaçlarını destekliyorsa değerli olabilir. Kötü tasarlanmış bir subgraph'in revize edilmesi veya yeniden yayımlanması gerekebilir ve ileride hata alıp çalışmayı durdurabilir. Küratörler için bir subgraph'in değerli olup olmadığını değerlendirmek için subgraph'in mimarisini veya kodunu gözden geçirmesi önemlidir. Sonuç olarak: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Küratörler, bir ağ hakkındaki bilgilerini kullanarak, belirli bir subgraph'in gelecekte daha yüksek veya daha düşük sorgu hacmi oluşturma olasılığını tahmin etmeye çalışabilirler. +- Küratörler Graph Gezgini üzerinden erişilebilen metrikleri de anlamalıdır. Geçmiş sorgu hacmi ve subgraph geliştiricisinin kim olduğu gibi metrikler, bir subgraph'in sinyal vermeye değer olup olmadığını belirlemekte yardımcı olabilir. ### 3. Bir subgraph'ı güncellemenin maliyeti nedir? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Kürasyon paylarınızı yeni bir subgraph sürümüne taşımak %1'lik bir kürasyon vergisine tabidir. Küratörler, bir subgraph'in en yeni sürümüne abone olmayı tercih edebilir. Küratör payları otomatik olarak yeni bir sürüme taşındığında, Küratörler ayrıca kürasyon vergisinin yarısını (yani %0,5) öderler. Çünkü subgraph'lerin yükseltilmesi, zincir üzerinde gerçekleşen ve dolayısıyla gaz harcamayı gerektiren bir eylemdir. ### 4. Subgraph'ımı ne sıklıkla güncelleyebilirim? Subgraph'ınızı çok sık güncellememeniz önerilir. Daha fazla ayrıntı için yukarıdaki soruya bakın. -### 5. Can I sell my curation shares? +### 5. Kürasyon paylarımı satabilir miyim? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed). +Kürasyon payları, tanıdık olabileceğiniz diğer ERC20 token'ları gibi "satın alınamaz" veya "satılamaz." Sadece basılabilir (oluşturulabilir) veya yakılabilir (yok edilebilir). -As a Curator on Arbitrum, you are guaranteed to get back the GRT you initially deposited (minus the tax). +Arbitrum'da bir Küratör olarak başlangıçta yatırdığınız GRT'yi (vergi düşüldükten sonra) geri almanız garanti edilir. -### 6. Am I eligible for a curation grant? +### 6. Kürasyon hibesi için uygun muyum? -Curation grants are determined individually on a case-by-case basis. If you need assistance with curation, please send a request to support@thegraph.zendesk.com. +Kürasyon hibeleri, bireysel olarak vaka bazında değerlendirilir. Kürasyon konusunda yardıma ihtiyacınız varsa, lütfen support@thegraph.zendesk.com adresine bir talep gönderin. -Still confused? Check out our Curation video guide below: +Kafanız hala karışık mı? Aşağıdaki Kürasyon video rehberimizi inceleyin: From dffee45184c7535f668cdbde7d3bd3757b7e83dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:01 -0500 Subject: [PATCH 0494/1534] New translations curating.mdx (Ukrainian) --- website/src/pages/uk/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/uk/resources/roles/curating.mdx b/website/src/pages/uk/resources/roles/curating.mdx index 1004cb1566cc..4304c7c138df 100644 --- a/website/src/pages/uk/resources/roles/curating.mdx +++ b/website/src/pages/uk/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Дослідження підграфів](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## Як сигналізувати @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. Ринок запитів за своєю суттю молодий в Graph, і існує ризик того, що ваш %APY може бути нижчим, ніж ви очікуєте, через динаміку ринку, що зароджується. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Підграф може не працювати через різноманітні помилки (баги). Підграф, що не працює не стягує комісію за запити. В результаті вам доведеться почекати, поки розробник виправить усі помилки й випустить нову версію. - Якщо ви підключені до найновішої версії підграфу, ваші частки будуть автоматично перенесені до цієї нової версії. При цьому буде стягуватися податок на в розмірі 0,5%. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. How often can I update my subgraph? From 2e90f12d9ca694a470172881a212f0337d2a23ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:02 -0500 Subject: [PATCH 0495/1534] New translations curating.mdx (Chinese Simplified) --- website/src/pages/zh/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/zh/resources/roles/curating.mdx b/website/src/pages/zh/resources/roles/curating.mdx index 632d9926607f..54f4658473d7 100644 --- a/website/src/pages/zh/resources/roles/curating.mdx +++ b/website/src/pages/zh/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![浏览器子图](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## 如何进行信号处理 @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. 在Graph,查询市场本来就很年轻,由于市场动态刚刚开始,你的年收益率可能低于你的预期,这是有风险的。 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. 一个子图可能由于错误而失败。 一个失败的子图不会累积查询费用。 因此,你必须等待,直到开发人员修复错误并部署一个新的版本。 - 如果你订阅了一个子图的最新版本,你的份额将自动迁移到该新版本。 这将产生 0.5%的策展税。 - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. 升级一个子图的成本是多少? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. 我可以多频繁的升级子图? From 63b9f0a5ad8f7faa4625eeedaabcb70fd9eb961c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:04 -0500 Subject: [PATCH 0496/1534] New translations curating.mdx (Urdu (Pakistan)) --- website/src/pages/ur/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ur/resources/roles/curating.mdx b/website/src/pages/ur/resources/roles/curating.mdx index 1def81a47814..9e972e55ab7f 100644 --- a/website/src/pages/ur/resources/roles/curating.mdx +++ b/website/src/pages/ur/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![سب گراف ایکسپلورر](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## سگنل کرنے کا طریقہ @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. گراف میں کیوری کی مارکیٹ فطری طور پر جوان ہے اور اس بات کا خطرہ ہے کہ آپ کا %APY مارکیٹ کی نئی حرکیات کی وجہ سے آپ کی توقع سے کم ہو سکتا ہے. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. ایک سب گراف ایک بگ کی وجہ سے ناکام ہو سکتا ہے. ایک ناکام سب گراف کیوری کی فیس جمع نہیں کرتا ہے. اس کے نتیجے میں،آپ کو انتظار کرنا پڑے گاجب تک کہ ڈویلپر اس بگ کو کو ٹھیک نہیں کرتا اور نیا ورژن تعینات کرتا ہے. - اگر آپ نےسب گراف کے نۓ ورژن کو سبسکرائب کیا ہے. آپ کے حصص خود بخود اس نئے ورژن میں منتقل ہو جائیں گے۔ اس پر 0.5% کیوریشن ٹیکس لگے گا. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. سب گراف کو اپ ڈیٹ کرنے کی کیا قیمت ہے؟ -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. میں اپنے سب گراف کو کتنی بار اپ گریڈ کر سکتا ہوں؟ From 43278d4cfea5ae5a3c290614678558baec061b9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:05 -0500 Subject: [PATCH 0497/1534] New translations curating.mdx (Vietnamese) --- website/src/pages/vi/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/vi/resources/roles/curating.mdx b/website/src/pages/vi/resources/roles/curating.mdx index 3bb2134b36ac..e1633707faf3 100644 --- a/website/src/pages/vi/resources/roles/curating.mdx +++ b/website/src/pages/vi/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![Trình khám phá subgraph](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## Làm thế nào để phát tín hiệu @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. Một subgraph có thể thất bại do một lỗi. Một subgraph thất bại không tích lũy phí truy vấn. Do đó, bạn sẽ phải đợi cho đến khi nhà phát triển sửa lỗi và triển khai phiên bản mới. - Nếu bạn đã đăng ký phiên bản mới nhất của một subgraph, các cổ phần của bạn sẽ tự động chuyển sang phiên bản mới đó. Điều này sẽ phát sinh một khoản thuế curation 0.5%. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. How often can I update my subgraph? From 8c4bc88d5dcd620e90e110d81d010af2468c3d2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:06 -0500 Subject: [PATCH 0498/1534] New translations curating.mdx (Marathi) --- website/src/pages/mr/resources/roles/curating.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/mr/resources/roles/curating.mdx b/website/src/pages/mr/resources/roles/curating.mdx index d2e23cd0647b..2d504102644e 100644 --- a/website/src/pages/mr/resources/roles/curating.mdx +++ b/website/src/pages/mr/resources/roles/curating.mdx @@ -20,7 +20,7 @@ If you require assistance with curation to enhance the quality of service, pleas Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![एक्सप्लोरर सबग्राफ](/img/explorer-subgraphs.png) +![Explorer subgraphs](/img/explorer-subgraphs.png) ## सिग्नल कसे करावे @@ -48,7 +48,7 @@ However, it is recommended that curators leave their signaled GRT in place not o 1. द ग्राफमध्ये क्वेरी मार्केट मूळतः तरुण आहे आणि नवीन मार्केट डायनॅमिक्समुळे तुमचा %APY तुमच्या अपेक्षेपेक्षा कमी असण्याचा धोका आहे. 2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. बगमुळे सबग्राफ अयशस्वी होऊ शकतो. अयशस्वी सबग्राफ क्वेरी शुल्क जमा करत नाही. परिणामी, विकसक बगचे निराकरण करेपर्यंत आणि नवीन आवृत्ती तैनात करेपर्यंत तुम्हाला प्रतीक्षा करावी लागेल. - तुम्ही सबग्राफच्या नवीनतम आवृत्तीचे सदस्यत्व घेतले असल्यास, तुमचे शेअर्स त्या नवीन आवृत्तीमध्ये स्वयंचलितपणे स्थलांतरित होतील. यावर 0.5% क्युरेशन कर लागेल. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -68,7 +68,7 @@ Finding high-quality subgraphs is a complex task, but it can be approached in ma ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an onchain action that costs gas. ### 4. How often can I update my subgraph? From 55aaa84b857ce6d69e40da64ffc911424fd8fa76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:07 -0500 Subject: [PATCH 0499/1534] New translations curating.mdx (Hindi) --- .../src/pages/hi/resources/roles/curating.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/hi/resources/roles/curating.mdx b/website/src/pages/hi/resources/roles/curating.mdx index 2ef8fa055203..b8d3c5e006bf 100644 --- a/website/src/pages/hi/resources/roles/curating.mdx +++ b/website/src/pages/hi/resources/roles/curating.mdx @@ -8,31 +8,31 @@ Curators are critical to The Graph's decentralized economy. They use their knowl Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. +Curators The Graph network को कुशल बनाते हैं और [संकेत देना](#how-to-signal) वह प्रक्रिया है जिसका उपयोग Curators यह बताने के लिए करते हैं कि कौन सा subgraph Indexer के लिए अच्छा है। Indexers Curator से आने वाले संकेत पर भरोसा कर सकते हैं क्योंकि संकेत देना के दौरान, Curators subgraph के लिए एक curation share मिंट करते हैं, जो उन्हें उस subgraph द्वारा उत्पन्न भविष्य के पूछताछ शुल्क के एक हिस्से का हकदार बनाता है। Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. -The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. +[Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) यह सुनिश्चित करता है कि सभी सबग्राफ को index किया जाए। किसी विशेष subgraph पर GRT को संकेत करने से अधिक indexers उस पर आकर्षित होते हैं। curation के माध्यम से अतिरिक्त Indexers को प्रोत्साहित करना queries की सेवा की गुणवत्ता को बढ़ाने के लिए है, जिससे latency कम हो और नेटवर्क उपलब्धता में सुधार हो। When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. -यदि आपको सेवा की गुणवत्ता बढ़ाने के लिए क्यूरेशन में सहायता की आवश्यकता है, तो कृपया Edge & Node टीम को support@thegraph.zendesk.com पर एक अनुरोध भेजें और उन सबग्राफ़्स का उल्लेख करें जिनमें आपको सहायता चाहिए। +यदि आपको सेवा की गुणवत्ता बढ़ाने के लिए curation में सहायता की आवश्यकता हो, तो कृपया एज और नोड टीम को support@thegraph.zendesk.com पर अनुरोध भेजें और उन सबग्राफ को निर्दिष्ट करें जिनमें आपको सहायता चाहिए। Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). -![एक्सप्लोरर सबग्राफ](/img/explorer-subgraphs.png) +![Explorer सबग्राफ](/img/explorer-subgraphs.png) ## सिग्नल कैसे करें -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/subgraphs/explorer/) +Graph Explorer के Curator टैब में, curators नेटवर्क स्टैट्स के आधार पर कुछ सबग्राफ पर signal और unsignal कर सकेंगे। Graph Explorer में यह कैसे करना है, इसका चरण-दर-चरण अवलोकन पाने के लिए [यहाँ क्लिक करें](/subgraphs/explorer/)। एक क्यूरेटर एक विशिष्ट सबग्राफ संस्करण पर संकेत देना चुन सकता है, या वे अपने सिग्नल को स्वचालित रूप से उस सबग्राफ के नवीनतम उत्पादन निर्माण में माइग्रेट करना चुन सकते हैं। दोनों मान्य रणनीतियाँ हैं और अपने स्वयं के पेशेवरों और विपक्षों के साथ आती हैं। -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +विशेष संस्करण पर संकेत देना विशेष रूप से उपयोगी होता है जब एक subgraph को कई dapp द्वारा उपयोग किया जाता है। एक dapp को नियमित रूप से subgraph को नई विशेषता के साथ अपडेट करने की आवश्यकता हो सकती है। दूसरी dapp एक पुराना, अच्छी तरह से परीक्षण किया हुआ उपग्राफ subgraph संस्करण उपयोग करना पसंद कर सकती है। प्रारंभिक क्यूरेशन curation पर, 1% मानक कर tax लिया जाता है। अपने सिग्नल को स्वचालित रूप से नवीनतम उत्पादन बिल्ड में माइग्रेट करना यह सुनिश्चित करने के लिए मूल्यवान हो सकता है कि आप क्वेरी शुल्क अर्जित करते रहें। हर बार जब आप क्यूरेट करते हैं, तो 1% क्यूरेशन टैक्स लगता है। आप हर माइग्रेशन पर 0.5% क्यूरेशन टैक्स भी देंगे। सबग्राफ डेवलपर्स को बार-बार नए संस्करण प्रकाशित करने से हतोत्साहित किया जाता है - उन्हें सभी ऑटो-माइग्रेटेड क्यूरेशन शेयरों पर 0.5% क्यूरेशन टैक्स देना पड़ता है। -> **Note**: किसी विशेष subgraph को संकेत देने वाला पहला पता पहला curator माना जाता है और उसे बाकी के curators की तुलना में बहुत अधिक gas-intensive काम करना होगा क्योंकि पहला curators curation share tokens को initializes करता है, bonding curve को initializes करता है, और Graph proxy में tokens भी transfer करता है। +> **नोट**पहला पता जो किसी विशेष subgraph को सिग्नल करता है, उसे पहला curator माना जाएगा और उसे बाकी आने वाले curators की तुलना में अधिक गैस-इंटेंसिव कार्य करना होगा क्योंकि पहला curator curation share टोकन को इनिशियलाइज़ करता है और टोकन को The Graph प्रॉक्सी में ट्रांसफर करता है। ## Withdrawing your GRT @@ -47,8 +47,8 @@ However, it is recommended that curators leave their signaled GRT in place not o ## जोखिम 1. क्वेरी बाजार द ग्राफ में स्वाभाविक रूप से युवा है और इसमें जोखिम है कि नवजात बाजार की गतिशीलता के कारण आपका %APY आपकी अपेक्षा से कम हो सकता है। -2. क्यूरेशन शुल्क - जब कोई क्यूरेटर किसी सबग्राफ़ पर GRT सिग्नल करता है, तो उसे 1% क्यूरेशन टैक्स देना होता है। यह शुल्क जला दिया जाता है। -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/). +2. क्यूरेशन शुल्क - जब कोई क्यूरेटर किसी सबग्राफ़ पर GRT सिग्नल करता है, तो उसे 1% क्यूरेशन टैक्स देना होता है। यह शुल्क जला दिया जाता है। +3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/resources/roles/delegating/delegating/). 4. बग के कारण सबग्राफ विफल हो सकता है। एक विफल सबग्राफ क्वेरी शुल्क अर्जित नहीं करता है। नतीजतन, आपको तब तक इंतजार करना होगा जब तक कि डेवलपर बग को ठीक नहीं करता है और एक नया संस्करण तैनात करता है। - यदि आपने सबग्राफ के नवीनतम संस्करण की सदस्यता ली है, तो आपके शेयर उस नए संस्करण में स्वत: माइग्रेट हो जाएंगे। इस पर 0.5% क्यूरेशन टैक्स लगेगा। - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. @@ -61,14 +61,14 @@ By signalling on a subgraph, you will earn a share of all the query fees that th ### 2. मैं यह कैसे तय करूं कि कौन से सबग्राफ सिग्नल देने के लिए उच्च गुणवत्ता वाले हैं? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +उच्च-गुणवत्ता वाले सबग्राफ खोजना एक जटिल कार्य है, लेकिन इसे कई अलग-अलग तरीकों से किया जा सकता है। एक Curator के रूप में, आपको उन भरोसेमंद सबग्राफ को देखना चाहिए जो query volume को बढ़ा रहे हैं। एक भरोसेमंद subgraph मूल्यवान हो सकता है यदि वह पूर्ण, सटीक हो और किसी dapp की डेटा आवश्यकताओं को पूरा करता हो। एक खराब डिज़ाइन किया गया subgraph संशोधित या पुनः प्रकाशित करने की आवश्यकता हो सकती है और अंततः असफल भी हो सकता है। यह Curators के लिए अत्यंत महत्वपूर्ण है कि वे किसी subgraph की संरचना या कोड की समीक्षा करें ताकि यह आकलन कर सकें कि subgraph मूल्यवान है या नहीं। - क्यूरेटर नेटवर्क की अपनी समझ का उपयोग करके यह अनुमान लगाने की कोशिश कर सकते हैं कि भविष्य में कोई विशेष सबग्राफ़ अधिक या कम क्वेरी वॉल्यूम कैसे उत्पन्न कर सकता है। - क्यूरेटर को Graph Explorer के माध्यम से उपलब्ध मेट्रिक्स को भी समझना चाहिए। जैसे कि पिछले क्वेरी वॉल्यूम और सबग्राफ़ डेवलपर कौन है, ये मेट्रिक्स यह तय करने में मदद कर सकते हैं कि किसी सबग्राफ़ पर सिग्नलिंग करना उचित है या नहीं। ### 3. What’s the cost of updating a subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. +नए subgraph संस्करण में अपनी curation shares को माइग्रेट करने पर 1% curation टैक्स लगता है। Curators नए subgraph संस्करण को सब्सक्राइब करने का विकल्प चुन सकते हैं। जब curator shares अपने आप नए संस्करण में माइग्रेट होती हैं, तो Curators को आधा curation टैक्स, यानी 0.5%, देना पड़ता है क्योंकि सबग्राफ को अपग्रेड करना एक ऑनचेन क्रिया है जो गैस खर्च करती है। ### 4. How often can I update my subgraph? From 6347d5d9bf44c7091a07f8258fc8b660be471104 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:08 -0500 Subject: [PATCH 0500/1534] New translations tokenomics.mdx (Romanian) --- website/src/pages/ro/resources/tokenomics.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ro/resources/tokenomics.mdx b/website/src/pages/ro/resources/tokenomics.mdx index 73a1adda922b..4a9b42ca6e0d 100644 --- a/website/src/pages/ro/resources/tokenomics.mdx +++ b/website/src/pages/ro/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics of The Graph Network +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -21,13 +22,13 @@ The Graph plays a vital role in making blockchain data more accessible and suppo There are four primary network participants: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query subgraphs -4. Indexers - Backbone of blockchain data +4. Indexers - Backbone of blockchain data Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 0e5a03e2d6c8821d01b2bc041415a24c4e524aa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:09 -0500 Subject: [PATCH 0501/1534] New translations tokenomics.mdx (French) --- website/src/pages/fr/resources/tokenomics.mdx | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/website/src/pages/fr/resources/tokenomics.mdx b/website/src/pages/fr/resources/tokenomics.mdx index 96a7cd4d787a..27bbbee1af4d 100644 --- a/website/src/pages/fr/resources/tokenomics.mdx +++ b/website/src/pages/fr/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Les tokenomiques du réseau The Graph +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- Adresse du jeton GRT sur Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) -## The Roles of Network Participants +## Les rôles des participants au réseau There are four primary network participants: -1. Délégateurs - Déléguer les GRT aux indexeurs & sécuriser le réseau +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curateurs - Trouver les meilleurs subgraphs pour les indexeurs +2. Curateurs - Trouver les meilleurs subgraphs pour les indexeurs -3. Développeurs - Créer & interroger des subgraphs +3. Developers - Build & query subgraphs -4. Indexeurs - épine dorsale des données de la blockchain +4. Indexeurs - épine dorsale des données de la blockchain Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -43,7 +44,7 @@ There is a 0.5% delegation tax which is burned whenever a Delegator delegates GR If you're reading this, you're capable of becoming a Delegator right now by heading to the [network participants page](https://thegraph.com/explorer/participants/indexers), and delegating GRT to an Indexer of your choice. -## Curators (Earn GRT) +## Curateurs (Gagnez des GRT) Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. @@ -73,11 +74,11 @@ Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and th Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. -Indexers can earn GRT rewards in two ways: +Les Indexeurs peuvent gagner des récompenses en GRT de deux façons : -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. @@ -87,7 +88,7 @@ Indexers can increase their GRT allocations on subgraphs by accepting GRT delega The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. -## Token Supply: Burning & Issuance +## Token Supply : Incinération & Emission The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. @@ -97,6 +98,6 @@ The Graph is designed with multiple burning mechanisms to offset new token issua In addition to these regularly occurring burning activities, the GRT token also has a slashing mechanism in place to penalize malicious or irresponsible behavior by Indexers. If an Indexer is slashed, 50% of their indexing rewards for the epoch are burned (while the other half goes to the fisherman), and their self-stake is slashed by 2.5%, with half of this amount being burned. This helps to ensure that Indexers have a strong incentive to act in the best interests of the network and to contribute to its security and stability. -## Improving the Protocol +## Amélioration du protocole The Graph Network is ever-evolving and improvements to the economic design of the protocol are constantly being made to provide the best experience for all network participants. The Graph Council oversees protocol changes and community members are encouraged to participate. Get involved with protocol improvements in [The Graph Forum](https://forum.thegraph.com/). From 8d30113d6e46285f287c6875f620b49f7ae509ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:10 -0500 Subject: [PATCH 0502/1534] New translations tokenomics.mdx (Spanish) --- website/src/pages/es/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/es/resources/tokenomics.mdx b/website/src/pages/es/resources/tokenomics.mdx index 6b03a8e4d03d..cd30274637ea 100644 --- a/website/src/pages/es/resources/tokenomics.mdx +++ b/website/src/pages/es/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics de The Graph Network +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- Dirección del token GRT en Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. Delegadores - Delegan GRT a los Indexadores y aseguran la red +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curadores - Encuentran los mejores subgrafos para los Indexadores +2. Curadores - Encuentran los mejores subgrafos para los Indexadores -3. Desarrolladores - Construyen y consultan subgrafos +3. Developers - Build & query subgraphs -4. Indexadores: Son la columna vertebral de los datos de la blockchain +4. Indexadores: Son la columna vertebral de los datos de la blockchain Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From a7457a1e9d434dc701bd9c606bf8d808c7cafdc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:11 -0500 Subject: [PATCH 0503/1534] New translations tokenomics.mdx (Arabic) --- website/src/pages/ar/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ar/resources/tokenomics.mdx b/website/src/pages/ar/resources/tokenomics.mdx index 1afdc3ccf913..511af057534f 100644 --- a/website/src/pages/ar/resources/tokenomics.mdx +++ b/website/src/pages/ar/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: اقتصاد التوكن (Tokenomics) لشبكة الغراف +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- عنوان توكن GRT على Arbitrum One: [ 0x9623063377AD1B27544C965cCd7342f7EA7e88C7 ](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. المفوضين (Delegators) - يقومو بتفويض GRT للمفهرسين & تأمين الشبكة +1. Delegators - Delegate GRT to Indexers & secure the network -2. المنسقون (Curators) - يبحثون عن أفضل subgraphs للمفهرسين +2. المنسقون (Curators) - يبحثون عن أفضل subgraphs للمفهرسين -3. المطورون - بناء& ال subgraphs للاستعلام +3. Developers - Build & query subgraphs -4. المفهرسون (Indexers) - العمود الفقري لبيانات blockchain +4. المفهرسون (Indexers) - العمود الفقري لبيانات blockchain Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From f8768e0ffeca58f0b4b51c2dc19106d5502b1da0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:12 -0500 Subject: [PATCH 0504/1534] New translations tokenomics.mdx (Czech) --- website/src/pages/cs/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/cs/resources/tokenomics.mdx b/website/src/pages/cs/resources/tokenomics.mdx index 6937dd409631..92b1514574b4 100644 --- a/website/src/pages/cs/resources/tokenomics.mdx +++ b/website/src/pages/cs/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics sítě grafů +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- Adresa tokenu GRT na Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. Delegáti - delegování GRT na indexátory & zabezpečení sítě +1. Delegators - Delegate GRT to Indexers & secure the network -2. Kurátoři - nalezení nejlepších podgrafů pro indexátory +2. Kurátoři - nalezení nejlepších podgrafů pro indexátory -3. Vývojáři - Sestavení & dotazování podgrafů +3. Developers - Build & query subgraphs -4. Indexery - páteř blockchainových dat +4. Indexery - páteř blockchainových dat Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 33b856c11d9c7666c1f39aa95635d20a3bc5133c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:13 -0500 Subject: [PATCH 0505/1534] New translations tokenomics.mdx (German) --- website/src/pages/de/resources/tokenomics.mdx | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/src/pages/de/resources/tokenomics.mdx b/website/src/pages/de/resources/tokenomics.mdx index 4041e1223131..3dd13eb7d06a 100644 --- a/website/src/pages/de/resources/tokenomics.mdx +++ b/website/src/pages/de/resources/tokenomics.mdx @@ -1,9 +1,10 @@ --- title: Tokenomics des The Graph Netzwerks +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- -## Overview +## Überblick The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- GRT-Token-Adresse auf Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. Delegatoren - Delegieren Sie GRT an Indexer & sichern Sie das Netzwerk +1. Delegators - Delegate GRT to Indexers & secure the network -2. Kuratoren - Finden Sie die besten Untergraphen für Indexer +2. Kuratoren - Finden Sie die besten Untergraphen für Indexer -3. Entwickler - Erstellen & Abfragen von Untergraphen +3. Developers - Build & query subgraphs -4. Indexer - Das Rückgrat der Blockchain-Daten +4. Indexer - Das Rückgrat der Blockchain-Daten Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 4b64fbec2407b3344526350caa95157691ba8731 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:14 -0500 Subject: [PATCH 0506/1534] New translations tokenomics.mdx (Italian) --- website/src/pages/it/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/it/resources/tokenomics.mdx b/website/src/pages/it/resources/tokenomics.mdx index 701baf8b4d51..c342b803f911 100644 --- a/website/src/pages/it/resources/tokenomics.mdx +++ b/website/src/pages/it/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics di The Graph Network +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- Indirizzo del token GRT su Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. Delegator - Delegare il GRT agli Indexer e proteggere la rete +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curator - Trovare i migliori subgraph per gli Indexer +2. Curator - Trovare i migliori subgraph per gli Indexer -3. Sviluppatori - Costruire ed eseguire query del subgraph +3. Developers - Build & query subgraphs -4. Indexer - Struttura portante dei dati della blockchain +4. Indexer - Struttura portante dei dati della blockchain Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From f1ff0b4e5f1efca15a99f585146ba828ce11d7ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:15 -0500 Subject: [PATCH 0507/1534] New translations tokenomics.mdx (Japanese) --- website/src/pages/ja/resources/tokenomics.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ja/resources/tokenomics.mdx b/website/src/pages/ja/resources/tokenomics.mdx index 6d98f8813d6e..07a04a43b06c 100644 --- a/website/src/pages/ja/resources/tokenomics.mdx +++ b/website/src/pages/ja/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: グラフネットワークのトークノミクス +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -21,13 +22,13 @@ The Graph plays a vital role in making blockchain data more accessible and suppo There are four primary network participants: -1. デリゲーター - GRTをインデクサーに委任する & ネットワークを確保する +1. Delegators - Delegate GRT to Indexers & secure the network -2. キュレーター - インデクサーのために最適なサブグラフを見つける。 +2. キュレーター - インデクサーのために最適なサブグラフを見つける。 -3. 開発者 - ビルド& サブグラフのクエリ +3. Developers - Build & query subgraphs -4. インデクサー - ブロックチェーンデータのバックボーン +4. インデクサー - ブロックチェーンデータのバックボーン Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From ed5792ddf94aecdfc52d483cefac4c4da5de42f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:16 -0500 Subject: [PATCH 0508/1534] New translations tokenomics.mdx (Korean) --- website/src/pages/ko/resources/tokenomics.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/ko/resources/tokenomics.mdx b/website/src/pages/ko/resources/tokenomics.mdx index 73a1adda922b..4a9b42ca6e0d 100644 --- a/website/src/pages/ko/resources/tokenomics.mdx +++ b/website/src/pages/ko/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics of The Graph Network +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -21,13 +22,13 @@ The Graph plays a vital role in making blockchain data more accessible and suppo There are four primary network participants: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query subgraphs -4. Indexers - Backbone of blockchain data +4. Indexers - Backbone of blockchain data Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 0bb30134db36a84064dbbf4d5105d394386708ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:17 -0500 Subject: [PATCH 0509/1534] New translations tokenomics.mdx (Dutch) --- website/src/pages/nl/resources/tokenomics.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/nl/resources/tokenomics.mdx b/website/src/pages/nl/resources/tokenomics.mdx index 73a1adda922b..4a9b42ca6e0d 100644 --- a/website/src/pages/nl/resources/tokenomics.mdx +++ b/website/src/pages/nl/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics of The Graph Network +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -21,13 +22,13 @@ The Graph plays a vital role in making blockchain data more accessible and suppo There are four primary network participants: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query subgraphs -4. Indexers - Backbone of blockchain data +4. Indexers - Backbone of blockchain data Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From dc09d631c19bc246c2e643a23f50aa389579dc6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:18 -0500 Subject: [PATCH 0510/1534] New translations tokenomics.mdx (Polish) --- website/src/pages/pl/resources/tokenomics.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/pl/resources/tokenomics.mdx b/website/src/pages/pl/resources/tokenomics.mdx index 73a1adda922b..4a9b42ca6e0d 100644 --- a/website/src/pages/pl/resources/tokenomics.mdx +++ b/website/src/pages/pl/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics of The Graph Network +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -21,13 +22,13 @@ The Graph plays a vital role in making blockchain data more accessible and suppo There are four primary network participants: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query subgraphs -4. Indexers - Backbone of blockchain data +4. Indexers - Backbone of blockchain data Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 786f56dd351df08b937140086c09ed0bf59b6729 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:19 -0500 Subject: [PATCH 0511/1534] New translations tokenomics.mdx (Portuguese) --- website/src/pages/pt/resources/tokenomics.mdx | 91 ++++++++++--------- 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/website/src/pages/pt/resources/tokenomics.mdx b/website/src/pages/pt/resources/tokenomics.mdx index 088dc04cf3a7..f5994ac88795 100644 --- a/website/src/pages/pt/resources/tokenomics.mdx +++ b/website/src/pages/pt/resources/tokenomics.mdx @@ -1,102 +1,103 @@ --- title: Tokenomia da Graph Network -description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. +sidebarTitle: Tokenomics +description: A Graph Network é incentivada por uma tokenomia (economia de token) poderosa. Veja como funciona o GRT, o token nativo de utilidades de trabalho no The Graph. --- ## Visão geral -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +O The Graph é um protocolo descentralizado que permite acesso fácil a dados de blockchain. Ele indexa dados de blockchain da mesma forma que o Google indexa a web; se já usou um dApp (aplicativo descentralizado) que resgata dados de um subgraph, você provavelmente já interagiu com o The Graph. Hoje, milhares de [dApps populares](https://thegraph.com/explorer) no ecossistema da Web3 usam o The Graph. ## Especificações -The Graph's model is akin to a B2B2C model, but it's driven by a decentralized network where participants collaborate to provide data to end users in exchange for GRT rewards. GRT is the utility token for The Graph. It coordinates and incentivizes the interaction between data providers and consumers within the network. +O modelo do The Graph é parecido com um modelo B2B2C, mas é movido por uma rede descentralizada onde os participantes colaboram para fornecer dados a utilizadores finais em troca de recompensas em GRT. Ele coordena e incentiva a interação entre provedores de dados e consumidores dentro da rede. -The Graph plays a vital role in making blockchain data more accessible and supports a marketplace for its exchange. To learn more about The Graph's pay-for-what-you-need model, check out its [free and growth plans](/subgraphs/billing/). +O The Graph tem um papel principal na acessibilidade de dados de blockchain, e apoia um mercado para o seu comércio. Para saber mais sobre o modelo de "pague pelo necessário" do The Graph, [clique aqui](/subgraphs/billing/). - Endereço do Token GRT na Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- Endereço do Token GRT no Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- Endereço do Token GRT na Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) -## The Roles of Network Participants +## Os Papeis dos Participantes na Rede -There are four primary network participants: +Há quatro participantes primários na rede: -1. Delegantes — Delegam GRT aos Indexers & protegem a rede +1. Delegantes — Delegam GRT aos Indexadores e protegem a rede -2. Curadores — Encontram os melhores subgraphs para Indexadores +2. Curadores — Encontram os melhores subgraphs para Indexadores -3. Programadores — Constroem & consultam subgraphs em queries +3. Programadores — Constroem e consultam subgraphs em queries -4. Indexadores — Rede de transporte de dados em blockchain +4. Indexadores — Rede de transporte de dados em blockchain -Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). +Pescadores e Árbitros também são integrais ao êxito da rede através de outras contribuições, com o seu apoio ao trabalho dos outros papéis participantes primários. Para saber mais sobre papéis na rede, [leia este artigo](https://thegraph.com/blog/the-graph-grt-token-economics/). -![Tokenomics diagram](/img/updated-tokenomics-image.png) +![Diagrama de tokenomia](/img/updated-tokenomics-image.png) -## Delegators (Passively earn GRT) +## Delegantes (Ganham GRT passivamente) -Indexers are delegated GRT by Delegators, increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Os Delegantes delegam GRT a Indexadores, aumentando o stake do Indexador em subgraphs na rede. Em troca, os Delegantes ganham uma porcentagem de todas as taxas de query e recompensas de indexação do Indexador. Cada Indexador determina a porção que será recompensada aos Delegantes de forma independente, criando competição entre Indexadores para atrair Delegantes. Muitos Indexadores oferecem entre 9 e 12% ao ano. -For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1,500 GRT in rewards annually. +Por exemplo, se um Delegante delegasse 15.000 GRT a um Indexador que oferecesse 10%, o Delegante receberia cerca de 1.500 GRT em recompensas por ano. -There is a 0.5% delegation tax which is burned whenever a Delegator delegates GRT on the network. If a Delegator chooses to withdraw their delegated GRT, the Delegator must wait for the 28-epoch unbonding period. Each epoch is 6,646 blocks, which means 28 epochs ends up being approximately 26 days. +Há uma taxa de 0.5% de delegação, queimada quando um Delegante delega GRT na rede. Se um Delegante escolher sacar o GRT que delegou, o Delegante deve esperar o período de desligamento de 28 epochs. Cada epoch dura 6.646 blocos, o que significa que 28 blocos duram, em média, 26 dias. -If you're reading this, you're capable of becoming a Delegator right now by heading to the [network participants page](https://thegraph.com/explorer/participants/indexers), and delegating GRT to an Indexer of your choice. +Quem ler isto pode tornar-se um Delegante agora mesmo na [página de participantes da rede](https://thegraph.com/explorer/participants/indexers), e começar a delegar GRT a um Indexador da sua escolha. -## Curators (Earn GRT) +## Curadores (Ganham GRT) -Curators identify high-quality subgraphs and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Os Curadores identificam subgraphs de alta qualidade e os "curam" (por ex., sinalizam GRT neles) para ganhar ações de curadoria, que garantem uma porção de todas as taxas de query futuras geradas pelo subgraph. Enquanto qualquer participante independente da rede pode ser um Curador, os programadores de subgraphs tendem a ser os primeiros Curadores dos seus próprios subgraphs, pois querem garantir que o seu subgraph seja indexado. -Subgraph developers are encouraged to curate their subgraph with at least 3,000 GRT. However, this number may be impacted by network activity and community participation. +Desde 11 de abril de 2024, os programadores de subgraphs podem curar o seu subgraph com, no mínimo, 3.000 GRT. Porém, este número pode ser impactado pela atividade na rede e participação na comunidade. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Os Curadores pagam uma taxa de curadoria de 1% ao curar um subgraph novo. Esta taxa de curadoria é queimada, de modo a reduzir a reserva de GRT. -## Developers +## Programadores -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Os programadores constroem e fazem queries em subgraphs para retirar dados da blockchain. Como os subgraphs têm o código aberto, os programadores podem carregar dados da blockchain em seus dApps com queries nos subgraphs existentes. Os programadores pagam por queries feitos em GRT, que é distribuído aos participantes da rede. ### Como criar um Subgraph -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Para indexar dados na blockchain, os programadores podem [criar um subgraph](]/developing/creating-a-subgraph/) — um conjunto de instruções para Indexadores sobre quais dados devem ser servidos aos consumidores. -Once developers have built and tested their subgraph, they can [publish their subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Depois que os programadores tiverem criado e testado o seu subgraph, eles poderão [editá-lo](/subgraphs/developing/publishing/publishing-a-subgraph/) na rede descentralizada do The Graph. ### Como fazer queries um Subgraph existente -Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Depois que um subgraph for [editado](/subgraphs/developing/publishing/publishing-a-subgraph/) na rede descentralizada do The Graph, qualquer um poderá criar uma chave API, depositar GRT no seu saldo de cobrança, e consultar o subgraph em um query. -Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. +Os Subgraphs [recebem queries pelo GraphQL](/subgraphs/querying/introduction/), e as taxas de query são pagas em GRT no [Subgraph Studio](https://thegraph.com/studio/). As taxas de query são distribuídas a participantes da rede com base nas suas contribuições ao protocolo. -1% of the query fees paid to the network are burned. +1% das taxas de query pagas à rede é queimado. -## Indexers (Earn GRT) +## Indexadores (Ganham GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Os Indexadores são o núcleo do The Graph: operam o equipamento e o software independentes que movem a rede descentralizada do The Graph. Eles servem dados a consumidores baseado em instruções de subgraphs. -Indexers can earn GRT rewards in two ways: +Os Indexadores podem ganhar recompensas em GRT de duas maneiras: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Taxas de query**: GRT pago, por programadores ou utilizadores, para queries de dados de subgraph. Taxas de query são distribuídas diretamente a Indexadores conforme a função de rebate exponencial (veja o GIP [aqui](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Recompensas de indexação**: a emissão anual de 3% é distribuída aos Indexadores com base no número de subgraphs que indexam. Estas recompensas os incentivam a indexar subgraphs, às vezes antes das taxas de query começarem, de modo a acumular e enviar Provas de Indexação (POIs) que verificam que indexaram dados corretamente. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Cada subgraph recebe uma porção da emissão total do token na rede, com base na quantia do sinal de curadoria do subgraph. Essa quantia é então recompensada aos Indexadores com base no seu stake alocado no subgraph. -In order to run an indexing node, Indexers must self-stake 100,000 GRT or more with the network. Indexers are incentivized to self-stake GRT in proportion to the amount of queries they serve. +Para executar um node de indexação, os Indexadores devem fazer um stake de 100.000 GRT ou mais com a rede. Os mesmos são incentivados a fazer um stake de GRT, proporcional à quantidade de queries que servem. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial self-stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial self-stake), they will not be able to use the additional GRT from Delegators until they increase their self-stake in the network. +Os Indexadores podem aumentar suas alocações de GRT nos subgraphs ao aceitar delegações de GRT de Delegantes; também podem aceitar até 16 vezes a quantia do seu stake inicial. Se um Indexador se tornar "excessivamente delegado" (por ex., com seu stake inicial multiplicado mais de 16 vezes), ele não poderá usar o GRT adicional dos Delegantes até aumentar o seu próprio stake na rede. -The amount of rewards an Indexer receives can vary based on the Indexer's self-stake, accepted delegation, quality of service, and many more factors. +A quantidade de recompensas recebidas por um Indexador pode variar com base no seu auto-stake, delegação aceita, qualidade de serviço, e muito mais fatores. -## Token Supply: Burning & Issuance +## Reserva de Tokens: Queima e Emissão -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +A reserva inicial de tokens é de 10 bilhões de GRT, com um alvo de emissão de 3% novos ao ano para recompensar os Indexadores por alocar stake em subgraphs. Portanto, a reserva total de tokens GRT aumentará por 3% a cada ano à medida que tokens novos são emitidos para Indexadores, pela sua contribuição à rede. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +O The Graph é projetado com vários mecanismos de queima para compensar pela emissão de novos tokens. Aproximadamente 1% da reserva de GRT é queimado todo ano, através de várias atividades na rede, e este número só aumenta conforme a atividade na rede cresce. Estas atividades de queima incluem: uma taxa de delegação de 0,5% sempre que um Delegante delega GRT a um Indexador; uma taxa de curadoria de 1% quando Curadores sinalizam em um subgraph; e 1% de taxas de query por dados de blockchain. -![Total burned GRT](/img/total-burned-grt.jpeg) +[Total de GRT Queimado](/img/total-burned-grt.jpeg) -In addition to these regularly occurring burning activities, the GRT token also has a slashing mechanism in place to penalize malicious or irresponsible behavior by Indexers. If an Indexer is slashed, 50% of their indexing rewards for the epoch are burned (while the other half goes to the fisherman), and their self-stake is slashed by 2.5%, with half of this amount being burned. This helps to ensure that Indexers have a strong incentive to act in the best interests of the network and to contribute to its security and stability. +Além destas atividades recorrentes de queima, o token GRT também tem um mecanismo de corte para penalizar comportamentos maliciosos ou irresponsáveis pelos Indexadores. Se um Indexador for cortado, é queimado 50% das suas recompensas de indexação no epoch (e a outra metade vai ao pescador); e o seu auto-stake é cortado em 2.5%, com metade desta quantidade queimada. Isto ajuda a garantir que os Indexadores tenham um forte incentivo a agir nos melhores interesses da rede e contribuir à sua segurança e estabilidade. -## Improving the Protocol +## Como Aprimorar o Protocolo -The Graph Network is ever-evolving and improvements to the economic design of the protocol are constantly being made to provide the best experience for all network participants. The Graph Council oversees protocol changes and community members are encouraged to participate. Get involved with protocol improvements in [The Graph Forum](https://forum.thegraph.com/). +A Graph Network está sempre a evoluir, e o projeto económico do protocolo está sempre a melhorar, de modo a entregar a melhor experiência para todos os participantes da rede. O Graph Council (Conselho do Graph) supervisa as mudanças no protocolo e os membros da comunidade estão convidados para participar. Ajude a melhorar o protocolo no Fórum do The Graph. From e7b62ad33473fe5f34cd9f4b51a6644d6899b16f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:20 -0500 Subject: [PATCH 0512/1534] New translations tokenomics.mdx (Russian) --- website/src/pages/ru/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ru/resources/tokenomics.mdx b/website/src/pages/ru/resources/tokenomics.mdx index 8340b03ef09d..e4ab88d45844 100644 --- a/website/src/pages/ru/resources/tokenomics.mdx +++ b/website/src/pages/ru/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Токеномика сети The Graph +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- Адрес токена GRT в сети Arbitrum One [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. Делегаторы - Делегируют токены GRT Индексерам & защищают сеть +1. Delegators - Delegate GRT to Indexers & secure the network -2. Кураторы - Ищут лучшие субграфы для Индексаторов +2. Кураторы - Ищут лучшие субграфы для Индексаторов -3. Разработчики - Создают & запрашивают субграфы +3. Developers - Build & query subgraphs -4. Индексаторы - Магистральный канал передачи данных блокчейна +4. Индексаторы - Магистральный канал передачи данных блокчейна Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 8c9749bad35eec371af1ea674cdd977022fcf7c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:21 -0500 Subject: [PATCH 0513/1534] New translations tokenomics.mdx (Swedish) --- website/src/pages/sv/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/sv/resources/tokenomics.mdx b/website/src/pages/sv/resources/tokenomics.mdx index 113baff87745..3d6c4666a960 100644 --- a/website/src/pages/sv/resources/tokenomics.mdx +++ b/website/src/pages/sv/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics för The Graf Nätverk +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- GRT Tokenadress på Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. Delegater - Delegera GRT till Indexers & säkra nätverket +1. Delegators - Delegate GRT to Indexers & secure the network -2. Kuratorer - Hitta de bästa subgrafterna för Indexers +2. Kuratorer - Hitta de bästa subgrafterna för Indexers -3. Utvecklare - Bygg & fråga subgrafter +3. Developers - Build & query subgraphs -4. Indexers - Grundvalen för blockkedjedata +4. Indexers - Grundvalen för blockkedjedata Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 3dcaf81c372f774dabacf97ce90ea7f667af020e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:22 -0500 Subject: [PATCH 0514/1534] New translations tokenomics.mdx (Turkish) --- website/src/pages/tr/resources/tokenomics.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/tr/resources/tokenomics.mdx b/website/src/pages/tr/resources/tokenomics.mdx index a0ecd1efd609..ff09d144619c 100644 --- a/website/src/pages/tr/resources/tokenomics.mdx +++ b/website/src/pages/tr/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics of The Graph Network +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -21,13 +22,13 @@ The Graph plays a vital role in making blockchain data more accessible and suppo There are four primary network participants: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query subgraphs -4. Indexers - Backbone of blockchain data +4. Indexers - Backbone of blockchain data Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 12fde32bbaa6d4e65ca04959f437848300c8a579 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:23 -0500 Subject: [PATCH 0515/1534] New translations tokenomics.mdx (Ukrainian) --- website/src/pages/uk/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/uk/resources/tokenomics.mdx b/website/src/pages/uk/resources/tokenomics.mdx index 1abf2da2618f..709ebb3b40c0 100644 --- a/website/src/pages/uk/resources/tokenomics.mdx +++ b/website/src/pages/uk/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Токеноміка мережі The Graph +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- Адреса GRT токена в мережі Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. Делегати - делегують токен GRT індексаторам & забезпечують захист мережі +1. Delegators - Delegate GRT to Indexers & secure the network -2. Куратори - знаходять найкращі підграфи для індексаторів +2. Куратори - знаходять найкращі підграфи для індексаторів -3. Розробники - розробляють & роблять запити до підграфів +3. Developers - Build & query subgraphs -4. Індексатори - кістяк блокчейн-даних +4. Індексатори - кістяк блокчейн-даних Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 8b7c79958c5a669a66d11372e04400079f88c327 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:24 -0500 Subject: [PATCH 0516/1534] New translations tokenomics.mdx (Chinese Simplified) --- website/src/pages/zh/resources/tokenomics.mdx | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/src/pages/zh/resources/tokenomics.mdx b/website/src/pages/zh/resources/tokenomics.mdx index cd2c84562e70..c9062327aa5d 100644 --- a/website/src/pages/zh/resources/tokenomics.mdx +++ b/website/src/pages/zh/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Graph网络的代币经济学 +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- Arbitrum One上的GRT代币地址: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. 授权人-将GRT授权给索引人并确保网络安全 +1. Delegators - Delegate GRT to Indexers & secure the network -2. 策展人-为索引人找到最佳子图 +2. 策展人-为索引人找到最佳子图 -3. 开发人员-构建和查询子图 +3. Developers - Build & query subgraphs -4. 索引人-区块链数据的主干 +4. 索引人-区块链数据的主干 Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -51,7 +52,7 @@ Subgraph developers are encouraged to curate their subgraph with at least 3,000 Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. -## Developers +## 开发人员 Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 03fa6094636763be88fd09a13fe4f672c386097d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:25 -0500 Subject: [PATCH 0517/1534] New translations tokenomics.mdx (Urdu (Pakistan)) --- website/src/pages/ur/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ur/resources/tokenomics.mdx b/website/src/pages/ur/resources/tokenomics.mdx index bfcb005f6400..269dfc583951 100644 --- a/website/src/pages/ur/resources/tokenomics.mdx +++ b/website/src/pages/ur/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: گراف نیٹ ورک کے ٹوکنومکس +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- آربٹرم ون پر GRT ٹوکن ایڈریس: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. ڈیلیگیٹرز - انڈیکسرز کو GRT ڈیلیگیٹ کریں اور نیٹ ورک کو محفوظ کریں +1. Delegators - Delegate GRT to Indexers & secure the network -2. کیوریٹرز - انڈیکسرز کے لیے بہترین سب گراف تلاش کریں +2. کیوریٹرز - انڈیکسرز کے لیے بہترین سب گراف تلاش کریں -3. ڈویلپرز - سب گراف تعمیر اور کیوری کرنا +3. Developers - Build & query subgraphs -4. انڈیکسرز - بلاکچین ڈیٹا کی ریڑھ کی ہڈی +4. انڈیکسرز - بلاکچین ڈیٹا کی ریڑھ کی ہڈی Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 2bfbcfc81b9de5fd41b867c19a503fec975ca22c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:26 -0500 Subject: [PATCH 0518/1534] New translations tokenomics.mdx (Vietnamese) --- website/src/pages/vi/resources/tokenomics.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/vi/resources/tokenomics.mdx b/website/src/pages/vi/resources/tokenomics.mdx index af3aad3befc0..4b1d2516879a 100644 --- a/website/src/pages/vi/resources/tokenomics.mdx +++ b/website/src/pages/vi/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: Tokenomics of The Graph Network +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -21,13 +22,13 @@ The Graph plays a vital role in making blockchain data more accessible and suppo There are four primary network participants: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best subgraphs for Indexers -3. Developers - Build & query subgraphs +3. Developers - Build & query subgraphs -4. Indexers - Backbone of blockchain data +4. Indexers - Backbone of blockchain data Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 14d88b5b9546ab67d4e954628014c497a609bbde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:28 -0500 Subject: [PATCH 0519/1534] New translations tokenomics.mdx (Marathi) --- website/src/pages/mr/resources/tokenomics.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/mr/resources/tokenomics.mdx b/website/src/pages/mr/resources/tokenomics.mdx index 4c3add397870..0fe45e9d9969 100644 --- a/website/src/pages/mr/resources/tokenomics.mdx +++ b/website/src/pages/mr/resources/tokenomics.mdx @@ -1,5 +1,6 @@ --- title: ग्राफ नेटवर्कचे टोकनॉमिक्स +sidebarTitle: Tokenomics description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token, works. --- @@ -15,19 +16,19 @@ The Graph plays a vital role in making blockchain data more accessible and suppo - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- आर्बिट्रम वन वर GRT टोकन पत्ता: [0x9623063377AD1B27544C965cCd738](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) ## The Roles of Network Participants There are four primary network participants: -1. प्रतिनिधी - GRT इंडेक्सर्सना सोपवा & नेटवर्क सुरक्षित करा +1. Delegators - Delegate GRT to Indexers & secure the network -2. क्युरेटर - इंडेक्सर्ससाठी सर्वोत्तम सबग्राफ शोधा +2. क्युरेटर - इंडेक्सर्ससाठी सर्वोत्तम सबग्राफ शोधा -3. विकासक - बिल्ड & क्वेरी सबग्राफ +3. Developers - Build & query subgraphs -4. इंडेक्सर्स - ब्लॉकचेन डेटाचा कणा +4. इंडेक्सर्स - ब्लॉकचेन डेटाचा कणा Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -75,9 +76,9 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). +1. **Query fees**: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. +2. **Indexing rewards**: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs), verifying that they have indexed data accurately. Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From 6f88066366016a3bb2e4534196c7b5499bd7bdc2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:29 -0500 Subject: [PATCH 0520/1534] New translations tokenomics.mdx (Hindi) --- website/src/pages/hi/resources/tokenomics.mdx | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/website/src/pages/hi/resources/tokenomics.mdx b/website/src/pages/hi/resources/tokenomics.mdx index 3b059d2c1439..e3437e3a0fff 100644 --- a/website/src/pages/hi/resources/tokenomics.mdx +++ b/website/src/pages/hi/resources/tokenomics.mdx @@ -1,17 +1,18 @@ --- title: ग्राफ नेटवर्क के टोकनोमिक्स +sidebarTitle: Tokenomics description: The Graph Network को शक्तिशाली टोकनोमिक्स द्वारा प्रोत्साहित किया जाता है। यहां बताया गया है कि GRT, The Graph का मूल कार्य उपयोगिता टोकन, कैसे काम करता है। --- ## अवलोकन -The Graph is a decentralized protocol that enables easy access to blockchain data. It indexes blockchain data similarly to how Google indexes the web. If you've used a dapp that retrieves data from a subgraph, you've probably interacted with The Graph. Today, thousands of [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem use The Graph. +The Graph एक विकेन्द्रीकृत प्रोटोकॉल है जो ब्लॉकचेन डेटा तक आसान पहुंच सक्षम करता है। यह ब्लॉकचेन डेटा को उसी तरह से अनुक्रमित करता है जैसे Google वेब को अनुक्रमित करता है। यदि आपने किसी dapp का उपयोग किया है जो किसी Subgraph से डेटा पुनर्प्राप्त करता है, तो संभवतः आपने The Graph के साथ इंटरैक्ट किया है। आज, वेब3 इकोसिस्टम में हजारों [popular dapps](https://thegraph.com/explorer) The Graph का उपयोग कर रहे हैं। ## विशिष्टताएँ -The Graph's model is akin to a B2B2C model, but it's driven by a decentralized network where participants collaborate to provide data to end users in exchange for GRT rewards. GRT is the utility token for The Graph. It coordinates and incentivizes the interaction between data providers and consumers within the network. +The Graph का मॉडल एक B2B2C मॉडल के समान है, लेकिन यह एक विकेंद्रीकृत नेटवर्क द्वारा संचालित होता है जहां प्रतिभागी एंड यूजर्स को डेटा प्रदान करने के लिए सहयोग करते हैं और इसके बदले में उन्हें GRT इनाम के रूप में मिलता है। GRT The Graph का यूटिलिटी टोकन है। यह नेटवर्क के भीतर डेटा प्रोवाइडर्स और कंज्यूमर्स के बीच इंटरैक्शन को समन्वित और प्रोत्साहित करता है। -The Graph plays a vital role in making blockchain data more accessible and supports a marketplace for its exchange. To learn more about The Graph's pay-for-what-you-need model, check out its [free and growth plans](/subgraphs/billing/). +The Graph ब्लॉकचेन डेटा को अधिक सुलभ बनाने में महत्वपूर्ण भूमिका निभाता है और इसके आदान-प्रदान के लिए एक मार्केटप्लेस का समर्थन करता है। The Graph के पे-फॉर-व्हाट-यू-नीड मॉडल के बारे में अधिक जानने के लिए, इसके [free and growth plans](/subgraphs/billing/) देखें। - GRT Token Address on Mainnet: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) @@ -21,13 +22,13 @@ The Graph plays a vital role in making blockchain data more accessible and suppo There are four primary network participants: -1. डेलिगेटर्स - इंडेक्सर्स और amp के लिए GRT डेलिगेट करें; नेटवर्क को सुरक्षित करें +1. Delegators - Delegate GRT to Indexers & secure the network -2. Curators - Find the best subgraphs for Indexers +2. Curators - Find the best subgraphs for Indexers -3. डेवलपर - निर्माण और amp; क्वेरी सबग्राफ +3. Developers - Build & query subgraphs -4. इंडेक्सर्स - ब्लॉकचेन डेटा की रीढ़ +4. इंडेक्सर्स - ब्लॉकचेन डेटा की रीढ़ Fishermen and Arbitrators are also integral to the network's success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). @@ -65,19 +66,19 @@ Once developers have built and tested their subgraph, they can [publish their su Once a subgraph is [published](/subgraphs/developing/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. -Subgraphs are [queried using GraphQL](/subgraphs/querying/introduction/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. +सबग्राफ़ को GraphQL का उपयोग करके क्वेरी किया जाता है()/subgraphs/querying/introduction/, और क्वेरी शुल्क को Subgraph Studio()https://thegraph.com/studio/ में GRT के साथ भुगतान किया जाता है। क्वेरी शुल्क को नेटवर्क प्रतिभागियों में उनके प्रोटोकॉल में योगदान के आधार पर वितरित किया जाता है। 1% of the query fees paid to the network are burned. ## Indexers (Earn GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers The Graph की रीढ़ हैं। वे स्वतंत्र हार्डवेयर और सॉफ़्टवेयर संचालित करते हैं जो The Graph के विकेन्द्रीकृत नेटवर्क को शक्ति प्रदान करता है। Indexers, सबग्राफ से निर्देशों के आधार पर उपभोक्ताओं को डेटा प्रदान करते हैं। -Indexers can earn GRT rewards in two ways: +Indexers दो तरीकों से GRT रिवार्ड्स कमा सकते हैं: -1. **क्वेरी शुल्क**: डेवलपर्स या उपयोगकर्ताओं द्वारा subgraph डेटा क्वेरी के लिए भुगतान किए गए GRT। क्वेरी शुल्क सीधे गुणात्मक रिबेट फंक्शन के अनुसार Indexers को वितरित किए जाते हैं (GIP [यहां](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162) देखें)। +1. **क्वेरी शुल्क:** डेवलपर्स या उपयोगकर्ताओं द्वारा Subgraph डेटा क्वेरी के लिए भुगतान किया गया GRT। क्वेरी शुल्क सीधे Indexers को एक्सपोनेंशियल रिबेट फ़ंक्शन के अनुसार वितरित किया जाता है (देखें GIP [यहाँ](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162))। -2. **इंडेक्सिंग रिवार्ड्स**: 3% वार्षिक इश्यूअंस उन Indexers को वितरित किया जाता है, जो वे कितने subgraphs को इंडेक्स कर रहे हैं, उस पर आधारित होता है। ये रिवार्ड्स Indexers को subgraphs को इंडेक्स करने के लिए प्रेरित करते हैं, कभी-कभी query fees शुरू होने से पहले, ताकि वे Proofs of Indexing (POIs) एकत्रित और सबमिट कर सकें, यह सत्यापित करने के लिए कि उन्होंने डेटा को सही ढंग से इंडेक्स किया है। +2. **Indexing रिवार्ड्स**: 3% की वार्षिक जारी राशि Indexers को उनके द्वारा indexed किए गए सबग्राफकी संख्या के आधार पर वितरित की जाती है। ये पुरस्कार Indexers को सबग्राफको index करने के लिए प्रेरित करते हैं, कभी-कभी query fees शुरू होने से पहले भी, ताकि वे Proofs of Indexing (POIs) को एकत्रित और प्रस्तुत कर सकें, यह सत्यापित करने के लिए कि उन्होंने डेटा को सटीक रूप से index किया है। Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. From c690989326d1e59f53389d9af81b4090adaa9c91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:30 -0500 Subject: [PATCH 0521/1534] New translations billing.mdx (Romanian) --- website/src/pages/ro/subgraphs/billing.mdx | 59 +++++++++++----------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/website/src/pages/ro/subgraphs/billing.mdx b/website/src/pages/ro/subgraphs/billing.mdx index a88a51c3adff..c9f380bb022c 100644 --- a/website/src/pages/ro/subgraphs/billing.mdx +++ b/website/src/pages/ro/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Billing --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From ebe4002395fe301654b3e77bc9dba6731588ca75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:31 -0500 Subject: [PATCH 0522/1534] New translations billing.mdx (French) --- website/src/pages/fr/subgraphs/billing.mdx | 113 +++++++++++---------- 1 file changed, 57 insertions(+), 56 deletions(-) diff --git a/website/src/pages/fr/subgraphs/billing.mdx b/website/src/pages/fr/subgraphs/billing.mdx index 4202d9c194ce..ba4239f2ea01 100644 --- a/website/src/pages/fr/subgraphs/billing.mdx +++ b/website/src/pages/fr/subgraphs/billing.mdx @@ -2,20 +2,20 @@ title: Facturation --- -## Les Plans de Facturation des Subgraphs +## Querying Plans Il y a deux plans à utiliser lorsqu'on interroge les subgraphs sur le réseau de The Graph. -- **Le Plan Gratuit**: Le Plan Gratuit comprend 100,000 requêtes mensuelles gratuites avec un accès complet à l'environnement de test de Subgraph Studio. Ce plan est conçu pour les amateurs, les participants aux hackathons et ceux qui ont des projets annexes pour essayer The Graph avant de faire évoluer leur dApp. +- **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. -- **Plan de croissance**: Le plan de croissance comprend tout ce qui est inclus dans le plan gratuit avec toutes les requêtes après 100 000 requêtes mensuelles nécessitant des paiements en GRT ou par carte de crédit. Le plan de croissance est suffisamment flexible pour couvrir les besoins des équipes qui ont établi des dapps à dans une variété de cas d'utilisation. +- **Growth Plan**: The Growth Plan includes everything in the Free Plan with all queries after 100,000 monthly queries requiring payments with GRT or credit card. The Growth Plan is flexible enough to cover teams that have established dapps across a variety of use cases. ## Paiements de Requêtes avec Carte de Crédit⁠ - Pour mettre en place la facturation par carte de crédit/débit, les utilisateurs doivent accéder à Subgraph Studio (https://thegraph.com/studio/) - 1. Accédez à la [page de facturation de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Cliquez sur le bouton "Connecter le portefeuille" dans le coin supérieur droit de la page. Vous serez redirigé vers la page de sélection des portefeuilles. Sélectionnez votre portefeuille et cliquez sur "Connecter". 3. Choisissez « Mettre à niveau votre abonnement » si vous effectuez une mise à niveau depuis le plan gratuit, ou choisissez « Gérer l'abonnement » si vous avez déjà ajouté des GRT à votre solde de facturation par le passé. Ensuite, vous pouvez estimer le nombre de requêtes pour obtenir une estimation du prix, mais ce n'est pas une étape obligatoire. 4. Pour choisir un paiement par carte de crédit, choisissez “Credit card” comme mode de paiement et remplissez les informations de votre carte de crédit. Ceux qui ont déjà utilisé Stripe peuvent utiliser la fonctionnalité Link pour remplir automatiquement leurs informations. @@ -37,24 +37,25 @@ L'utilisation du GRT sur Arbitrum est nécessaire pour le paiement des requêtes - Si vous avez déjà des GRT sur Ethereum, vous pouvez les transférer vers Arbitrum. Vous pouvez le faire via l'option de transfert de GRT fournie dans Subgraph Studio ou en utilisant l'un des ponts suivants : -- [Le pont Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) +- [Le Bridge Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - Si vous possédez déjà des actifs sur Arbitrum, vous pouvez les échanger contre du GRT via un protocole d'échange comme Uniswap. - Alternativement, vous pouvez acquérir du GRT directement sur Arbitrum via un échange décentralisé. -> Cette section est rédigée en supposant que vous avez déjà des GRT dans votre portefeuille et que vous êtes sur Arbitrum. Si vous n'avez pas de GRT, vous pouvez apprendre comment obtenir des GRT [ici](#getting-grt). +> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). Une fois que vous avez transféré du GRT, vous pouvez l'ajouter à votre solde de facturation. ### Ajout de GRT à l'aide d'un portefeuille -1. Accédez à la [page de facturation de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Cliquez sur le bouton "Connecter le portefeuille" dans le coin supérieur droit de la page. Vous serez redirigé vers la page de sélection des portefeuilles. Sélectionnez votre portefeuille et cliquez sur "Connecter". 3. Cliquez sur le bouton « Manage » situé dans le coin supérieur droit. Les nouveaux utilisateurs verront l'option « Upgrade to Growth plan » (Passer au plan de croissance), tandis que les utilisateurs existants devront sélectionner « Deposit from wallet » (Déposer depuis le portefeuille). 4. Utilisez le curseur pour estimer le nombre de requêtes que vous prévoyez d’effectuer sur une base mensuelle. - - Pour des suggestions sur le nombre de requêtes que vous pouvez utiliser, consultez notre page **Foire aux questions**. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. 5. Choisissez "Cryptocurrency". Le GRT est actuellement la seule cryptomonnaie acceptée sur le réseau The Graph. 6. Sélectionnez le nombre de mois pour lesquels vous souhaitez effectuer un paiement anticipé. - Le paiement anticipé ne vous engage pas sur une utilisation future. Vous ne serez facturé que pour ce que vous utiliserez et vous pourrez retirer votre solde à tout moment. @@ -67,7 +68,7 @@ Une fois que vous avez transféré du GRT, vous pouvez l'ajouter à votre solde ### Retirer des GRT en utilisant un portefeuille -1. Accédez à la [page de facturation de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Cliquez sur le bouton "Connect Wallet" dans le coin supérieur droit de la page. Sélectionnez votre portefeuille et cliquez sur "Connect". 3. Cliquez sur le bouton « Gérer » dans le coin supérieur droit de la page. Sélectionnez « Retirer des GRT ». Un panneau latéral apparaîtra. 4. Entrez le montant de GRT que vous voudriez retirer. @@ -76,11 +77,11 @@ Une fois que vous avez transféré du GRT, vous pouvez l'ajouter à votre solde ### Ajout de GRT à l'aide d'un portefeuille multisig -1. Allez à la page [Facturation de Studio Subgraph](https://thegraph.com/studio/subgraphs/billing/). -2. Cliquez sur le bouton "Connect Wallet" dans le coin supérieur droit de la page. Sélectionnez votre portefeuille et cliquez sur "Connect". Si vous utilisez [Gnosis-Safe](https://gnosis-safe.io/), vous pourrez connecter votre multisig ainsi que votre portefeuille de signature. Ensuite, signez le message associé. Cela ne coûtera aucun gaz. +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. 3. Cliquez sur le bouton « Manage » situé dans le coin supérieur droit. Les nouveaux utilisateurs verront l'option « Upgrade to Growth plan » (Passer au plan de croissance), tandis que les utilisateurs existants devront sélectionner « Deposit from wallet » (Déposer depuis le portefeuille). 4. Utilisez le curseur pour estimer le nombre de requêtes que vous prévoyez d’effectuer sur une base mensuelle. - - Pour des suggestions sur le nombre de requêtes que vous pouvez utiliser, consultez notre page **Foire aux questions**. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. 5. Choisissez "Cryptocurrency". Le GRT est actuellement la seule cryptomonnaie acceptée sur le réseau The Graph. 6. Sélectionnez le nombre de mois pour lesquels vous souhaitez effectuer un paiement anticipé. - Le paiement anticipé ne vous engage pas sur une utilisation future. Vous ne serez facturé que pour ce que vous utiliserez et vous pourrez retirer votre solde à tout moment. @@ -98,7 +99,7 @@ Cette section vous montrera comment obtenir du GRT pour payer les frais de requ Voici un guide étape par étape pour acheter de GRT sur Coinbase. -1. Accédez à [Coinbase](https://www.coinbase.com/) et créez un compte. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. 2. Dès que vous aurez créé un compte, vous devrez vérifier votre identité par le biais d'un processus connu sous le nom de KYC (Know Your Customer ou Connaître Votre Client). Il s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. 3. Une fois votre identité vérifiée, vous pouvez acheter des GRT. Pour ce faire, cliquez sur le bouton « Acheter/Vendre » en haut à droite de la page. 4. Sélectionnez la devise que vous souhaitez acheter. Sélectionnez GRT. @@ -106,19 +107,19 @@ Voici un guide étape par étape pour acheter de GRT sur Coinbase. 6. Sélectionnez la quantité de GRT que vous souhaitez acheter. 7. Vérifiez votre achat. Vérifiez votre achat et cliquez sur "Buy GRT". 8. Confirmez votre achat. Confirmez votre achat et vous aurez acheté des GRT avec succès. -9. Vous pouvez transférer les GRT de votre compte vers votre portefeuille tel que [MetaMask](https://metamask.io/). +9. You can transfer the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). - Pour transférer les GRT dans votre portefeuille, cliquez sur le bouton "Accounts" en haut à droite de la page. - Cliquez sur le bouton "Send" à côté du compte GRT. - Entrez le montant de GRT que vous souhaitez envoyer et l'adresse du portefeuille vers laquelle vous souhaitez l'envoyer. - Cliquez sur "Continue" et confirmez votre transaction. -Veuillez noter que pour des montants d'achat plus importants, Coinbase peut vous demander d'attendre 7 à 10 jours avant de transférer le montant total vers un portefeuille. -Vous pouvez en savoir plus sur l'obtention de GRT sur Coinbase [ici](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Ceci est un guide étape par étape pour l'achat des GRT sur Binance. -1. Allez sur [Binance](https://www.binance.com/en) et créez un compte. +1. Go to [Binance](https://www.binance.com/en) and create an account. 2. Dès que vous aurez créé un compte, vous devrez vérifier votre identité par le biais d'un processus connu sous le nom de KYC (Know Your Customer ou Connaître Votre Client). Il s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. 3. Une fois votre identité vérifiée, vous pouvez acheter des GRT. Pour ce faire, cliquez sur le bouton « Acheter maintenant » sur la bannière de la page d'accueil. 4. Vous accéderez à une page où vous pourrez sélectionner la devise que vous souhaitez acheter. Sélectionnez GRT. @@ -126,27 +127,27 @@ Ceci est un guide étape par étape pour l'achat des GRT sur Binance. 6. Sélectionnez la quantité de GRT que vous souhaitez acheter. 7. Confirmez votre achat et cliquez sur « Acheter des GRT ». 8. Confirmez votre achat et vous pourrez voir vos GRT dans votre portefeuille Binance Spot. -9. Vous pouvez retirer les GRT de votre compte vers votre portefeuille tel que [MetaMask](https://metamask.io/). - - [Pour retirer](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) des GRT dans votre portefeuille, ajoutez l'adresse de votre portefeuille à la liste blanche des retraits. +9. You can withdraw the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). + - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. - Cliquez sur le bouton « portefeuille », cliquez sur retrait et sélectionnez GRT. - Saisissez le montant de GRT que vous souhaitez envoyer et l'adresse du portefeuille sur liste blanche à laquelle vous souhaitez l'envoyer. - Cliquer sur « Continuer » et confirmez votre transaction. -Vous pouvez en savoir plus sur l'obtention de GRT sur Binance [ici](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap Voici comment vous pouvez acheter des GRT sur Uniswap. -1. Accédez à [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) et connectez votre portefeuille. +1. Go to [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) and connect your wallet. 2. Sélectionnez le jeton dont vous souhaitez échanger. Sélectionnez ETH. 3. Sélectionnez le jeton vers lequel vous souhaitez échanger. Sélectionnez GRT. - - Assurez-vous que vous échangez contre le bon jeton. L'adresse du contrat intelligent GRT sur Arbitrum One est la suivante : [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) + - Make sure you're swapping for the correct token. The GRT smart contract address on Arbitrum One is: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) 4. Entrez le montant d'ETH que vous souhaitez échanger. 5. Cliquez sur « Échanger ». 6. Confirmez la transaction dans votre portefeuille et attendez qu'elle soit traitée. -Vous pouvez en savoir plus sur l'obtention de GRT sur Uniswap [ici](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). ## Obtenir de l'Ether⁠ @@ -156,43 +157,43 @@ Cette section vous montrera comment obtenir de l'Ether (ETH) pour payer les frai Ce sera un guide étape par étape pour acheter de l'ETH sur Coinbase. -1. Accédez à [Coinbase](https://www.coinbase.com/) et créez un compte. -2. Une fois que vous avez créé un compte, vérifiez votre identité via un processus appelé KYC (ou Know Your Customer). l s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. -3. Une fois que vous avez vérifié votre identité, achetez de l'ETH en cliquant sur le bouton « Acheter/Vendre » en haut à droite de la page. -4. Choisissez la devise que vous souhaitez acheter. Sélectionnez ETH. -5. Sélectionnez votre mode de paiement préféré. -6. Entrez le montant d'ETH que vous souhaitez acheter. -7. Vérifiez votre achat et cliquez sur « Acheter des Ethereum ». -8. Confirmez votre achat et vous aurez acheté avec succès de l'ETH. -9. Vous pouvez transférer l'ETH de votre compte Coinbase vers votre portefeuille tel que [MetaMask](https://metamask.io/). - - Pour transférer l'ETH vers votre portefeuille, cliquez sur le bouton « Comptes » en haut à droite de la page. - - Cliquez sur le bouton « Envoyer » à côté du compte ETH. - - Entrez le montant d'ETH que vous souhaitez envoyer et l'adresse du portefeuille vers lequel vous souhaitez l'envoyer. - - Assurez-vous que vous envoyez à votre adresse de portefeuille Ethereum sur Arbitrum One. - - Cliquez sur "Continuer" et confirmez votre transaction. - -Vous pouvez en savoir plus sur l'obtention d'ETH sur Coinbase [ici](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Une fois que vous avez créé un compte, vérifiez votre identité via un processus appelé KYC (ou Know Your Customer). l s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. +3. Une fois que vous avez vérifié votre identité, achetez de l'ETH en cliquant sur le bouton « Acheter/Vendre » en haut à droite de la page. +4. Choisissez la devise que vous souhaitez acheter. Sélectionnez ETH. +5. Sélectionnez votre mode de paiement préféré. +6. Entrez le montant d'ETH que vous souhaitez acheter. +7. Vérifiez votre achat et cliquez sur « Acheter des Ethereum ». +8. Confirmez votre achat et vous aurez acheté avec succès de l'ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - Pour transférer l'ETH vers votre portefeuille, cliquez sur le bouton « Comptes » en haut à droite de la page. + - Cliquez sur le bouton « Envoyer » à côté du compte ETH. + - Entrez le montant d'ETH que vous souhaitez envoyer et l'adresse du portefeuille vers lequel vous souhaitez l'envoyer. + - Assurez-vous que vous envoyez à votre adresse de portefeuille Ethereum sur Arbitrum One. + - Cliquer sur « Continuer » et confirmez votre transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Ce sera un guide étape par étape pour acheter des ETH sur Binance. -1. Accédez à [Binance](https://www.binance.com/en) et créez un compte. -2. Une fois que vous avez créé un compte, vérifiez votre identité via un processus appelé KYC (ou Know Your Customer). Il s’agit d’une procédure standard pour tous les échanges cryptographiques centralisés ou dépositaires. -3. Une fois que vous avez vérifié votre identité, achetez des ETH en cliquant sur le bouton « Acheter maintenant » sur la bannière de la page d'accueil. -4. Sélectionnez la devise que vous souhaitez acheter. Sélectionnez ETH. -5. Sélectionnez votre mode de paiement préféré. -6. Entrez le montant d'ETH que vous souhaitez acheter. -7. Vérifiez votre achat et cliquez sur « Acheter ETH ». -8. Confirmez votre achat et vous verrez votre ETH dans votre portefeuille Binance Spot. -9. Vous pouvez retirer l'ETH de votre compte vers votre portefeuille tel que [MetaMask](https://metamask.io/). - - Pour retirer l'ETH vers votre portefeuille, ajoutez l'adresse de votre portefeuille à la liste blanche de retrait. - - Cliquez sur le bouton « portefeuille », cliquez sur retirer et sélectionnez ETH. - - Entrez le montant d'ETH que vous souhaitez envoyer et l'adresse du portefeuille sur liste blanche à laquelle vous souhaitez l'envoyer. - - Assurez-vous que vous envoyez à votre adresse de portefeuille Ethereum sur Arbitrum One. - - Cliquez sur "Continuer" et confirmez votre transaction. - -Vous pouvez en savoir plus sur l'obtention d'ETH sur Binance [ici](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Une fois que vous avez créé un compte, vérifiez votre identité via un processus appelé KYC (ou Know Your Customer). l s'agit d'une procédure standard pour toutes les plateformes d'échange de crypto-monnaies centralisées ou dépositaires. +3. Une fois que vous avez vérifié votre identité, achetez des ETH en cliquant sur le bouton « Acheter maintenant » sur la bannière de la page d'accueil. +4. Choisissez la devise que vous souhaitez acheter. Sélectionnez ETH. +5. Sélectionnez votre mode de paiement préféré. +6. Entrez le montant d'ETH que vous souhaitez acheter. +7. Vérifiez votre achat et cliquez sur « Acheter des Ethereum ». +8. Confirmez votre achat et vous verrez votre ETH dans votre portefeuille Binance Spot. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - Pour retirer l'ETH vers votre portefeuille, ajoutez l'adresse de votre portefeuille à la liste blanche de retrait. + - Cliquez sur le bouton « portefeuille », cliquez sur retirer et sélectionnez ETH. + - Entrez le montant d'ETH que vous souhaitez envoyer et l'adresse du portefeuille sur liste blanche à laquelle vous souhaitez l'envoyer. + - Assurez-vous que vous envoyez à votre adresse de portefeuille Ethereum sur Arbitrum One. + - Cliquer sur « Continuer » et confirmez votre transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## FAQ sur la facturation @@ -202,11 +203,11 @@ Vous n'avez pas besoin de savoir à l'avance combien de requêtes vous aurez bes Nous vous recommandons de surestimer le nombre de requêtes dont vous aurez besoin afin de ne pas avoir à recharger votre solde fréquemment. Pour les applications de petite et moyenne taille, une bonne estimation consiste à commencer par 1 à 2 millions de requêtes par mois et à surveiller de près l'utilisation au cours des premières semaines. Pour les applications plus grandes, une bonne estimation consiste à utiliser le nombre de visites quotidiennes que reçoit votre site multiplié par le nombre de requêtes que votre page la plus active effectue à son ouverture. -Bien entendu, les nouveaux utilisateurs et les utilisateurs existants peuvent contacter l'équipe BD d'Edge & ; Node pour une consultation afin d'en savoir plus sur l'utilisation prévue. +Of course, both new and existing users can reach out to Edge & Node's BD team for a consult to learn more about anticipated usage. ### Puis-je retirer du GRT de mon solde de facturation ? -Oui, vous pouvez toujours retirer les GRT qui n'ont pas déjà été utilisés pour des requêtes de votre solde de facturation. Le contrat de facturation est uniquement conçu pour transférer des GRT de l'Ethereum Mainnet vers le réseau Arbitrum. Si vous souhaitez transférer vos GRT d'Arbitrum vers le réseau principal Ethereum, vous devrez utiliser le [pont Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161). +Yes, you can always withdraw GRT that has not already been used for queries from your billing balance. The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). ### Que se passe-t-il lorsque mon solde de facturation est épuisé ? Vais-je recevoir un avertissement ? From bf837b35de2e4986ab743659ad71a82395a1a877 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:32 -0500 Subject: [PATCH 0523/1534] New translations billing.mdx (Spanish) --- website/src/pages/es/subgraphs/billing.mdx | 203 +++++++++++---------- 1 file changed, 102 insertions(+), 101 deletions(-) diff --git a/website/src/pages/es/subgraphs/billing.mdx b/website/src/pages/es/subgraphs/billing.mdx index 9a53e25ff7ee..b2210285e434 100644 --- a/website/src/pages/es/subgraphs/billing.mdx +++ b/website/src/pages/es/subgraphs/billing.mdx @@ -2,106 +2,107 @@ title: Facturación --- -## Subgraph Billing Plans +## Planes de consultas -There are two plans to use when querying subgraphs on The Graph Network. +Existen dos planes para usar al consultar subgrafos en The Graph Network. -- **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. +- **Plan Gratuito**: El Plan Gratuito incluye 100.000 consultas mensuales gratuitas con acceso completo al entorno de pruebas de Subgraph Studio. Este plan está diseñado para aficionados, participantes de hackatones y aquellos con proyectos paralelos que deseen probar The Graph antes de escalar su dapp. -- **Growth Plan**: The Growth Plan includes everything in the Free Plan with all queries after 100,000 monthly queries requiring payments with GRT or credit card. The Growth Plan is flexible enough to cover teams that have established dapps across a variety of use cases. +- Plan de Expansión: El Plan de Expansión incluye todo lo que ofrece el Plan Gratuito, pero todas las consultas que excedan las 100.000 consultas mensuales requieren pagos con GRT o tarjeta de crédito. El Plan de Expansión es lo suficientemente flexible como para cubrir las necesidades de equipos con dapps consolidadas en una variedad de casos de uso. -## Query Payments with credit card +## Pagos de consultas con tarjeta de crédito -- To set up billing with credit/debit cards, users should access Subgraph Studio (https://thegraph.com/studio/) - 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +- Para configurar la facturación con tarjetas de crédito/débito, los usuarios deben acceder a Subgraph Studio (https://thegraph.com/studio/) + 1. Ve a la [página de facturación de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). 2. Haz clic en el botón "Conectar wallet" en la esquina superior derecha de la página. Serás redirigido a la página de selección de wallet. Selecciona tu wallet y haz clic en "Conectar". - 3. Choose “Upgrade plan” if you are upgrading from the Free Plan or choose “Manage Plan” if you have already added GRT to your billing balance in the past. Next, you can estimate the number of queries to get a pricing estimate, but this is not a required step. - 4. To choose a credit card payment, choose “Credit card” as the payment method and fill out your credit card information. Those who have used Stripe before can use the Link feature to autofill their details. -- Invoices will be processed at the end of each month and require an active credit card on file for all queries beyond the free plan quota. + 3. Elige "Mejorar plan" si estás actualizando desde el Plan Gratuito, o selecciona "Gestionar plan" si ya has agregado GRT a tu saldo de facturación anteriormente. A continuación, puedes estimar la cantidad de consultas para obtener una estimación de precios, aunque este paso no es obligatorio. + 4. Para elegir el pago con tarjeta de crédito, selecciona "Tarjeta de crédito" como método de pago y completa la información de tu tarjeta. Aquellos que hayan utilizado Stripe anteriormente pueden usar la función Link para completar sus datos automáticamente. +- Las facturas se procesarán al final de cada mes y requieren una tarjeta de crédito activa registrada para todas las consultas que excedan la cuota del plan gratuito. -## Query Payments with GRT +## Pago de consultas con GRT -Subgraph users can use The Graph Token (or GRT) to pay for queries on The Graph Network. With GRT, invoices will be processed at the end of each month and require a sufficient balance of GRT to make queries beyond the Free Plan quota of 100,000 monthly queries. You'll be required to pay fees generated from your API keys. Using the billing contract, you'll be able to: +Los usuarios de subgrafos pueden usar The Graph Token (GRT) para pagar las consultas en The Graph Network. Con GRT, las facturas se procesarán al final de cada mes y será necesario contar con un saldo suficiente de GRT para realizar consultas que excedan la cuota del Plan Gratuito de 100.000 consultas mensuales. También deberás cubrir las tarifas generadas por tus claves de API. A través del contrato de facturación, podrás: - Agregar y retirar GRT de tu saldo de cuenta. - Llevar un registro de tus saldos en función de la cantidad de GRT que hayas agregado a tu saldo de cuenta, la cantidad que hayas retirado y tus facturas. - Paga automáticamente las facturas basadas en las tarifas de consulta generadas, siempre y cuando haya suficiente GRT en tu saldo de cuenta. -### GRT on Arbitrum or Ethereum +### GRT en Arbitrum o Ethereum -The Graph’s billing system accepts GRT on Arbitrum, and users will need ETH on Arbitrum to pay their gas. While The Graph protocol started on Ethereum Mainnet, all activity, including the billing contracts, is now on Arbitrum One. +El sistema de facturación de The Graph acepta GRT en Arbitrum, y los usuarios necesitarán ETH en Arbitrum para pagar el gas. Aunque el protocolo de The Graph comenzó en Ethereum Mainnet, toda la actividad, incluidos los contratos de facturación, ahora se realiza en Arbitrum One. -To pay for queries, you need GRT on Arbitrum. Here are a few different ways to achieve this: +Para pagar consultas, necesitas GRT en Arbitrum. Aquí tienes algunas formas de obtenerlo: -- If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: +- Si ya tienes GRT en Ethereum, puedes enviarlo a Arbitrum mediante un puente. Puedes hacerlo a través de la opción de transferencia de GRT proporcionada en Subgraph Studio o utilizando uno de los siguientes puentes: + +- [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) -- [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) -- If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. +- Si ya tienes activos en Arbitrum, puedes intercambiarlos por GRT a través de un protocolo de intercambio como Uniswap. -- Alternatively, you acquire GRT directly on Arbitrum through a decentralized exchange. +- Alternativamente, puedes adquirir GRT directamente en Arbitrum a través de un intercambio descentralizado. -> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +> Esta sección está redactada asumiendo que ya tienes GRT en tu wallet y que estás en Arbitrum. Si no tienes GRT, puedes aprender cómo obtenerlo [aquí](#getting-grt). -Once you bridge GRT, you can add it to your billing balance. +Una vez que transfieras GRT, puedes agregarlo a tu saldo de facturación. -### Adding GRT using a wallet +### Agregar GRT utilizando una wallet -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +1. Ve a la [página de facturación de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). 2. Haz clic en el botón "Conectar wallet" en la esquina superior derecha de la página. Serás redirigido a la página de selección de wallet. Selecciona tu wallet y haz clic en "Conectar". -3. Select the "Manage" button near the top right corner. First time users will see an option to "Upgrade to Growth plan" while returning users will click "Deposit from wallet". -4. Use the slider to estimate the number of queries you expect to make on a monthly basis. - - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. -5. Choose "Cryptocurrency". GRT is currently the only cryptocurrency accepted on The Graph Network. -6. Select the number of months you would like to prepay. - - Paying in advance does not commit you to future usage. You will only be charged for what you use and you can withdraw your balance at any time. +3. Selecciona el botón "Gestionar" en la esquina superior derecha. Los usuarios que acceden por primera vez verán la opción "Mejorar al Plan de Expansión", mientras que los usuarios recurrentes deberán hacer clic en "Depositar desde la wallet". +4. Utiliza la barra deslizante para estimar la cantidad de consultas que esperas realizar mensualmente. + - Para obtener sugerencias sobre la cantidad de consultas que podrías usar, consulta nuestra página de Preguntas Frecuentes. +5. Selecciona "Cripto". Actualmente, GRT es la única criptomoneda aceptada en The Graph Network. +6. Selecciona la cantidad de meses que deseas pagar por adelantado. + - Pagar por adelantado no te compromete a un uso futuro. Solo se te cobrará por lo que utilices, y puedes retirar tu saldo en cualquier momento. 7. Pick the network from which you are depositing your GRT. GRT on Arbitrum or Ethereum are both acceptable. -8. Click "Allow GRT Access" and then specify the amount of GRT that can be taken from you wallet. - - If you are prepaying for multiple months, you must allow access to the amount that corresponds with that amount. This interaction will not cost any gas. -9. Lastly, click on "Add GRT to Billing Balance". This transaction will require ETH on Arbitrum to cover the gas costs. +8. Haz clic en "Permitir acceso a GRT" y luego especifica la cantidad de GRT que se puede tomar de tu wallet. + - Si estás pagando por adelantado varios meses, debes permitirle acceso a la cantidad que corresponde con ese monto. Esta interacción no tendrá costo de gas. +9. Por último, haz clic en "Agregar GRT al saldo de facturación". Esta transacción requerirá ETH en Arbitrum para cubrir los costos de gas. -- Note that GRT deposited from Arbitrum will process within a few moments while GRT deposited from Ethereum will take approximately 15-20 minutes to process. Once the transaction is confirmed, you'll see the GRT added to your account balance. +- Ten en cuenta que los depósitos de GRT desde Arbitrum se procesarán en apenas un momento, mientras que los depósitos de GRT desde Ethereum tardarán aproximadamente 15-20 minutos en procesarse. Una vez que la transacción sea confirmada, verás el GRT agregado a tu saldo de cuenta. -### Withdrawing GRT using a wallet +### Retirar GRT utilizando una wallet -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". -3. Click the "Manage" button at the top right corner of the page. Select "Withdraw GRT". A side panel will appear. -4. Enter the amount of GRT you would like to withdraw. -5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. The GRT will be sent to your Arbitrum wallet. -6. Once the transaction is confirmed, you'll see the GRT withdrawn from your account balance in your Arbitrum wallet. +1. Ve a la [página de facturación de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +2. Haz clic en el botón "Conectar Wallet" en la esquina superior derecha de la página. Selecciona tu wallet y luego haz clic en "Conectar". +3. Haz clic en el botón "Administrar" en la esquina superior derecha de la página. Selecciona "Retirar GRT". Aparecerá un panel lateral. +4. Ingresa la cantidad de GRT que deseas retirar. +5. Haz clic en "Retirar GRT" para retirar el GRT de tu saldo de cuenta. Firma la transacción asociada en tu wallet. Esto tendrá un costo de gas. El GRT se enviará a tu wallet en Arbitrum. +6. Una vez que la transacción sea confirmada, verás el GRT retirado de tu saldo de cuenta en tu wallet de Arbitrum. ### Añadir GRT utilizando una wallet multisig -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. -3. Select the "Manage" button near the top right corner. First time users will see an option to "Upgrade to Growth plan" while returning users will click "Deposit from wallet". -4. Use the slider to estimate the number of queries you expect to make on a monthly basis. - - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. -5. Choose "Cryptocurrency". GRT is currently the only cryptocurrency accepted on The Graph Network. -6. Select the number of months you would like to prepay. - - Paying in advance does not commit you to future usage. You will only be charged for what you use and you can withdraw your balance at any time. -7. Pick the network from which you are depositing your GRT. GRT on Arbitrum or Ethereum are both acceptable. 8. Click "Allow GRT Access" and then specify the amount of GRT that can be taken from you wallet. - - If you are prepaying for multiple months, you must allow access to the amount that corresponds with that amount. This interaction will not cost any gas. -8. Lastly, click on "Add GRT to Billing Balance". This transaction will require ETH on Arbitrum to cover the gas costs. +1. Ve a la [página de facturación de Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +2. Haz clic en el botón "Conectar Wallet" en la esquina superior derecha de la página. Selecciona tu wallet y haz clic en "Conectar". Si estás utilizando [Gnosis-Safe](https://gnosis-safe.io/), podrás conectar tanto tu multisig como tu wallet firmante. Luego, firma el mensaje asociado. Esto no tendrá ningún costo de gas. +3. Selecciona el botón "Gestionar" en la esquina superior derecha. Los usuarios que acceden por primera vez verán la opción "Mejorar al Plan de Expansión", mientras que los usuarios recurrentes deberán hacer clic en "Depositar desde la wallet". +4. Utiliza la barra deslizante para estimar la cantidad de consultas que esperas realizar mensualmente. + - Para obtener sugerencias sobre la cantidad de consultas que podrías usar, consulta nuestra página de Preguntas Frecuentes. +5. Selecciona "Cripto". Actualmente, GRT es la única criptomoneda aceptada en The Graph Network. +6. Selecciona la cantidad de meses que deseas pagar por adelantado. + - Pagar por adelantado no te compromete a un uso futuro. Solo se te cobrará por lo que utilices, y puedes retirar tu saldo en cualquier momento. +7. Selecciona la red desde la cual estás depositando tu GRT. Tanto GRT en Arbitrum como en Ethereum son aceptables. 8. Haz clic en "Permitir acceso a GRT" y luego especifica la cantidad de GRT que se puede tomar de tu billetera. + - Si estás pagando por adelantado varios meses, debes permitirle acceso a la cantidad que corresponde con ese monto. Esta interacción no tendrá costo de gas. +8. Por último, haz clic en "Agregar GRT al saldo de facturación". Esta transacción requerirá ETH en Arbitrum para cubrir los costos de gas. -- Note that GRT deposited from Arbitrum will process within a few moments while GRT deposited from Ethereum will take approximately 15-20 minutes to process. Once the transaction is confirmed, you'll see the GRT added to your account balance. +- Ten en cuenta que los depósitos de GRT desde Arbitrum se procesarán en apenas un momento, mientras que los depósitos de GRT desde Ethereum tardarán aproximadamente 15-20 minutos en procesarse. Una vez que la transacción sea confirmada, verás el GRT agregado a tu saldo de cuenta. -## Getting GRT +## Obtener GRT -This section will show you how to get GRT to pay for query fees. +Esta sección te mostrará cómo obtener GRT para pagar las tarifas de consulta. ### Coinbase -This will be a step by step guide for purchasing GRT on Coinbase. +Esta será una guía paso a paso para comprar GRT en Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select GRT. +1. Ve a [Coinbase](https://www.coinbase.com/) y crea una cuenta. +2. Una vez que hayas creado una cuenta, necesitarás verificar tu identidad a través de un proceso conocido como KYC (o Conoce a tu Cliente). Este es un procedimiento estándar para todos los intercambios de criptomonedas centralizados o con custodia de activos. +3. Una vez que hayas verificado tu identidad, puedes comprar GRT. Para hacerlo, haz clic en el botón "Comprar/Vender" en la parte superior derecha de la página. +4. Selecciona la moneda que deseas comprar. Selecciona GRT. 5. Select the payment method. Select your preferred payment method. 6. Select the amount of GRT you want to purchase. 7. Review your purchase. Review your purchase and click "Buy GRT". @@ -119,7 +120,7 @@ You can learn more about getting GRT on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing GRT on Binance. 1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +2. Una vez que hayas creado una cuenta, necesitarás verificar tu identidad a través de un proceso conocido como KYC (o Conoce a tu Cliente). Este es un procedimiento estándar para todos los intercambios de criptomonedas centralizados o con custodia de activos. 3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy Now" button on the homepage banner. 4. You will be taken to a page where you can select the currency you want to purchase. Select GRT. 5. Select your preferred payment method. You'll be able to pay with different fiat currencies such as Euros, US Dollars, and more. @@ -156,58 +157,58 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. - -You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +1. Ve a [Coinbase](https://www.coinbase.com/) y crea una cuenta. +2. Una vez que hayas creado una cuenta, verifica tu identidad a través de un proceso conocido como KYC (o Conoce a tu Cliente). Este es un procedimiento estándar para todos los exchanges centralizados o que mantienen custodia de criptomonedas. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Selecciona la moneda que deseas comprar. Elige ETH. +5. Selecciona tu método de pago preferido. +6. Ingresa la cantidad de ETH que deseas comprar. +7. Revisa tu compra y haz clic en "Comprar ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Haz clic en el botón "Enviar" junto a la cuenta de ETH. + - Ingresa la cantidad de ETH que deseas enviar y la dirección de la wallet a la que quieres enviarlo. + - Asegúrate de que estás enviando a la dirección de tu wallet de Ethereum en Arbitrum One. + - Haz clic en "Continuar" y confirma tu transacción. + +Puedes obtener más información sobre cómo adquirir ETH en Coinbase [aquí](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -This will be a step by step guide for purchasing ETH on Binance. +Esta será una guía paso a paso para comprar ETH en Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Una vez que hayas creado una cuenta, verifica tu identidad a través de un proceso conocido como KYC (o Conoce a tu Cliente). Este es un procedimiento estándar para todos los exchanges centralizados o que mantienen custodia de criptomonedas. +3. Una vez que hayas verificado tu identidad, compra ETH haciendo clic en el botón "Comprar ahora" en el banner de la página de inicio. +4. Selecciona la moneda que deseas comprar. Elige ETH. +5. Selecciona tu método de pago preferido. +6. Ingresa la cantidad de ETH que deseas comprar. +7. Revisa tu compra y haz clic en "Comprar ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - Para retirar el ETH a tu wallet, añade la dirección de tu wallet a la lista de direcciones autorizadas para retiros. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Asegúrate de que estás enviando a la dirección de tu wallet de Ethereum en Arbitrum One. + - Haz clic en "Continuar" y confirma tu transacción. -You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +Puedes obtener más información sobre cómo adquirir ETH en Binance [aquí](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). -## Billing FAQs +## Preguntas Frecuentes sobre Facturación -### How many queries will I need? +### ¿Cuántas consultas necesitaré? -You don't need to know how many queries you'll need in advance. You will only be charged for what you use and you can withdraw GRT from your account at any time. +No necesitas saber cuántas consultas necesitarás de antemano. Solo se te cobrará por lo que uses y podrás retirar GRT de tu cuenta en cualquier momento. -We recommend you overestimate the number of queries you will need so that you don’t have to top up your balance frequently. A good estimate for small to medium sized applications is to start with 1M-2M queries per month and monitor usage closely in the first weeks. For larger apps, a good estimate is to use the number of daily visits your site gets multiplied by the number of queries your most active page makes upon opening. +Te recomendamos sobreestimar la cantidad de consultas que necesitarás para no tener que recargar tu saldo con frecuencia. Una buena estimación para aplicaciones pequeñas a medianas es comenzar con 1M-2M de consultas por mes y monitorear el uso de cerca en las primeras semanas. Para aplicaciones más grandes, una buena estimación es usar el número de visitas diarias que recibe tu sitio multiplicado por el número de consultas que hace la página más activa al abrirse. -Of course, both new and existing users can reach out to Edge & Node's BD team for a consult to learn more about anticipated usage. +Por supuesto, tanto los usuarios nuevos como los existentes pueden ponerse en contacto con el equipo de BD de Edge & Node para una consulta y aprender más sobre el uso anticipado. -### Can I withdraw GRT from my billing balance? +### ¿Puedo retirar GRT de mi saldo de facturación? -Yes, you can always withdraw GRT that has not already been used for queries from your billing balance. The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +Sí, siempre puedes retirar GRT que no haya sido utilizado para consultas de tu saldo de facturación. El contrato de facturación está diseñado únicamente para transferir GRT de la red principal de Ethereum a la red de Arbitrum. Si deseas transferir tu GRT de Arbitrum de vuelta a la red principal de Ethereum, necesitarás utilizar el [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). -### What happens when my billing balance runs out? Will I get a warning? +### ¿Qué sucede cuando se agota mi saldo de facturación? ¿Recibiré una advertencia? -You will receive several email notifications before your billing balance runs out. +Recibirás varias notificaciones por correo electrónico antes de que se agote tu saldo de facturación. From a94682c199a0aeed4d58238009dccd4c9e7ab157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:33 -0500 Subject: [PATCH 0524/1534] New translations billing.mdx (Arabic) --- website/src/pages/ar/subgraphs/billing.mdx | 61 +++++++++++----------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/website/src/pages/ar/subgraphs/billing.mdx b/website/src/pages/ar/subgraphs/billing.mdx index 5ac09c854a9f..e5b5deb5c4ef 100644 --- a/website/src/pages/ar/subgraphs/billing.mdx +++ b/website/src/pages/ar/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: الفوترة --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,7 +38,8 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) -- [الانتقال](https://transferto.xyz/swap) + +- [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From 4fd4a14284020bdb04ec6905a3ecb10505f3f431 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:34 -0500 Subject: [PATCH 0525/1534] New translations billing.mdx (Czech) --- website/src/pages/cs/subgraphs/billing.mdx | 67 +++++++++++----------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/website/src/pages/cs/subgraphs/billing.mdx b/website/src/pages/cs/subgraphs/billing.mdx index e9199473c24c..4118bf1d451a 100644 --- a/website/src/pages/cs/subgraphs/billing.mdx +++ b/website/src/pages/cs/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Fakturace --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - Pokud již máte GRT na platformě Ethereum, můžete ji přemostit na Arbitrum. Můžete tak učinit pomocí možnosti přemostění GRT, která je k dispozici v podgraf Studio, nebo pomocí jednoho z následujících přemostění: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,43 +157,43 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. - -Více informací o získání ETH na Coinbase se dozvíte [zde](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Toto bude průvodce krok za krokem pro nákup ETH na Binance. -1. Přejděte na [Binance](https://www.binance.com/en) a vytvořte si účet. -2. Jakmile si vytvoříte účet, ověřte svou totožnost prostřednictvím procesu známého jako KYC (neboli Know Your Customer). Jedná se o standardní postup u všech centralizovaných nebo depozitních kryptoburz. -3. Jakmile ověříte svou totožnost, zakupte ETH kliknutím na tlačítko "Koupit nyní" na banneru domovské stránky. -4. Vyberte měnu, kterou chcete zakoupit. Vyberte ETH. -5. Vyberte preferovaný způsob platby. -6. Zadejte částku ETH, kterou chcete koupit. -7. Zkontrolujte svůj nákup a klikněte na tlačítko "Koupit ETH". -8. Potvrďte nákup a v peněžence Binance Spot se vám zobrazí ETH. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Klikněte na tlačítko "peněženka", klikněte na vybrat a vyberte ETH. - - Zadejte částku ETH, kterou chcete poslat, a adresu peněženky, na kterou ji chcete poslat. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Klikněte na tlačítko "Pokračovat" a potvrďte transakci. - -Více informací o získání ETH na Binance se dozvíte [zde](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Jakmile ověříte svou totožnost, zakupte ETH kliknutím na tlačítko "Koupit nyní" na banneru domovské stránky. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Potvrďte nákup a v peněžence Binance Spot se vám zobrazí ETH. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Klikněte na tlačítko "peněženka", klikněte na vybrat a vyberte ETH. + - Zadejte částku ETH, kterou chcete poslat, a adresu peněženky, na kterou ji chcete poslat. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Billing FAQs From bc77233830101d6329d061046d16b4f429ad210a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:35 -0500 Subject: [PATCH 0526/1534] New translations billing.mdx (German) --- website/src/pages/de/subgraphs/billing.mdx | 115 +++++++++++---------- 1 file changed, 58 insertions(+), 57 deletions(-) diff --git a/website/src/pages/de/subgraphs/billing.mdx b/website/src/pages/de/subgraphs/billing.mdx index 25caec6a8310..7014ebf64d61 100644 --- a/website/src/pages/de/subgraphs/billing.mdx +++ b/website/src/pages/de/subgraphs/billing.mdx @@ -2,20 +2,20 @@ title: Billing --- -## Subgraph Abrechnungspläne +## Querying Plans Es gibt zwei Pläne für die Abfrage von Subgraphen in The Graph Network. -- **Free Plan**: Der Free Plan beinhaltet 100.000 kostenlose monatliche Abfragen mit vollem Zugriff auf die Subgraph Studio Testumgebung. Dieser Plan ist für Hobbyisten, Hackathon-Teilnehmer und diejenigen mit Nebenprojekten gedacht, die The Graph ausprobieren möchten, bevor sie ihre Dapp skalieren. +- **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. -- **Growth Plan**: Der Growth Plan beinhaltet alles, was im Free Plan enthalten ist, wobei alle Abfragen nach 100.000 monatlichen Abfragen eine Zahlung mit GRT oder Kreditkarte erfordern. Der Growth Plan ist flexibel genug, um Teams abzudecken, die Dapps für eine Vielzahl von Anwendungsfällen entwickelt haben. +- **Growth Plan**: The Growth Plan includes everything in the Free Plan with all queries after 100,000 monthly queries requiring payments with GRT or credit card. The Growth Plan is flexible enough to cover teams that have established dapps across a variety of use cases. ## Abfrage Zahlungen mit Kreditkarte - Um die Abrechnung mit Kredit-/Debitkarten einzurichten, müssen die Benutzer Subgraph Studio (https://thegraph.com/studio/) aufrufen - 1. Rufen Sie die [Subgraph Studio Abrechnungsseite](https://thegraph.com/studio/subgraphs/billing/) auf. + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". 3. Wählen Sie „ Upgrade Plan“, wenn Sie vom Free Plan upgraden oder wählen Sie „Manage Plan“, wenn Sie GRT bereits in der Vergangenheit zu Ihrem Abrechnungssaldo hinzugefügt haben. Als Nächstes können Sie die Anzahl der Abfragen schätzen, um einen Kostenvoranschlag zu erhalten, dieser Schritt ist jedoch nicht erforderlich. 4. Um eine Zahlung per Kreditkarte zu wählen, wählen Sie „Kreditkarte“ als Zahlungsmethode und geben Sie Ihre Kreditkartendaten ein. Diejenigen, die Stripe bereits verwendet haben, können die Funktion „Link“ verwenden, um ihre Daten automatisch auszufüllen. @@ -37,24 +37,25 @@ Um für Abfragen zu bezahlen, brauchen Sie GRT auf Arbitrum. Hier sind ein paar - Wenn Sie bereits GRT auf Ethereum haben, können Sie es zu Arbitrum überbrücken. Sie können dieses über GRT-Bridging-Option in Subgraph Studio tun oder eine der folgenden Bridges verwenden: -- [Die Arbitrum Brücke](https://bridge.arbitrum.io/?l2ChainId=42161) -- [Übertragen auf](https://transferto.xyz/swap) +- [Die Arbitrum-Brücke] (https://bridge.arbitrum.io/?l2ChainId=42161) + +- [TransferTo](https://transferto.xyz/swap) - Wenn du bereits Assets auf Arbitrum hast, kannst du sie über ein Swapping-Protokoll wie Uniswap in GRT tauschen. - Alternativ können Sie GRT auch direkt auf Arbitrum über einen dezentralen Handelsplatz erwerben. -> In diesem Abschnitt wird davon ausgegangen, dass du bereits GRT in deiner Wallet hast und auf Arbitrum bist. Wenn Sie keine GRT haben, können Sie erfahren, wie Sie GRT [hier](#getting-grt) bekommen. +> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). Sobald Sie GRT überbrücken, können Sie es zu Ihrem Rechnungssaldo hinzufügen. ### Hinzufügen von GRT mit einer Wallet -1. Rufen Sie die [Subgraph Studio Abrechnungsseite](https://thegraph.com/studio/subgraphs/billing/) auf. +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". 3. Wählen Sie die Schaltfläche „ Manage “ in der oberen rechten Ecke. Erstmalige Nutzer sehen die Option „Upgrade auf den Wachstumsplan“, während wiederkehrende Nutzer auf „Von der Wallet einzahlen“ klicken. 4. Verwenden Sie den Slider, um die Anzahl der Abfragen zu schätzen, die Sie monatlich erwarten. - - Vorschläge für die Anzahl der Abfragen, die Sie verwenden können, finden Sie auf unserer Seite **Häufig gestellte Fragen**. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. 5. Wählen Sie „Kryptowährung“. GRT ist derzeit die einzige Kryptowährung, die im The Graph Network akzeptiert wird. 6. Wählen Sie die Anzahl der Monate, die Sie im Voraus bezahlen möchten. - Die Zahlung im Voraus verpflichtet Sie nicht zu einer zukünftigen Nutzung. Ihnen wird nur das berechnet, was Sie verbrauchen, und Sie können Ihr Guthaben jederzeit abheben. @@ -67,7 +68,7 @@ Sobald Sie GRT überbrücken, können Sie es zu Ihrem Rechnungssaldo hinzufügen ### GRT über eine Wallet abheben -1. Rufen Sie die [Subgraph Studio Abrechnungsseite](https://thegraph.com/studio/subgraphs/billing/) auf. +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Klicken Sie auf die Schaltfläche „Connect Wallet“ in der oberen rechten Ecke der Seite. Wählen Sie Ihre Wallet aus und klicken Sie auf „Verbinden“. 3. Klicken Sie auf die Schaltfläche „Verwalten“ in der oberen rechten Ecke der Seite. Wählen Sie „GRT abheben“. Ein Seitenfenster wird angezeigt. 4. Geben Sie den Betrag der GRT ein, den Sie abheben möchten. @@ -76,11 +77,11 @@ Sobald Sie GRT überbrücken, können Sie es zu Ihrem Rechnungssaldo hinzufügen ### Adding GRT using a multisig wallet -1. Rufen Sie die [Subgraph Studio Abrechnungsseite](https://thegraph.com/studio/subgraphs/billing/) auf. -2. Klicke auf die Schaltfläche „Wallet verbinden“ in der oberen rechten Ecke der Seite. Wähle deine Wallet aus und klicke auf „Verbinden“. Wenn du die [Gnosis-Safe](https://gnosis-safe.io/) verwendest, kannst du sowohl deine Multisig-Wallet als auch deine Signatur-Wallet verbinden. Anschließend unterschreibe die zugehörige Nachricht. Dies verursacht keine Gasgebühren. +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. 3. Wählen Sie die Schaltfläche „ Manage “ in der oberen rechten Ecke. Erstmalige Nutzer sehen die Option „Upgrade auf den Wachstumsplan“, während wiederkehrende Nutzer auf „Von der Wallet einzahlen“ klicken. 4. Verwenden Sie den Slider, um die Anzahl der Abfragen zu schätzen, die Sie monatlich erwarten. - - Vorschläge für die Anzahl der Abfragen, die Sie verwenden können, finden Sie auf unserer Seite **Häufig gestellte Fragen**. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. 5. Wählen Sie „Kryptowährung“. GRT ist derzeit die einzige Kryptowährung, die im The Graph Network akzeptiert wird. 6. Wählen Sie die Anzahl der Monate, die Sie im Voraus bezahlen möchten. - Die Zahlung im Voraus verpflichtet Sie nicht zu einer zukünftigen Nutzung. Ihnen wird nur das berechnet, was Sie verbrauchen, und Sie können Ihr Guthaben jederzeit abheben. @@ -98,7 +99,7 @@ In diesem Abschnitt erfahren Sie, wie Sie GRT dazu bringen können, die Abfrageg Dies ist eine Schritt-für-Schritt-Anleitung für den Kauf von GRT auf Coinbase. -1. Gehen Sie zu [Coinbase](https://www.coinbase.com/) und erstellen Sie ein Konto. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. 2. Sobald Sie ein Konto erstellt haben, müssen Sie Ihre Identität durch ein Verfahren verifizieren, das als KYC (oder Know Your Customer) bekannt ist. Dies ist ein Standardverfahren für alle zentralisierten oder verwahrten Krypto-Börsen. 3. Sobald Sie Ihre Identität überprüft haben, können Sie GRT kaufen. Dazu klicken Sie auf die Schaltfläche „Kaufen/Verkaufen“ oben rechts auf der Seite. 4. Wählen Sie die Währung, die Sie kaufen möchten. Wählen Sie GRT. @@ -106,19 +107,19 @@ Dies ist eine Schritt-für-Schritt-Anleitung für den Kauf von GRT auf Coinbase. 6. Wählen Sie die Menge an GRT, die Sie kaufen möchten. 7. Überprüfen Sie Ihren Einkauf. Überprüfen Sie Ihren Einkauf und klicken Sie auf „GRT kaufen“. 8. Bestätigen Sie Ihren Kauf. Bestätigen Sie Ihren Kauf und Sie haben GRT erfolgreich gekauft. -9. Sie können die GRT von Ihrem Konto auf Ihre Wallet wie [MetaMask](https://metamask.io/) übertragen. +9. You can transfer the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). - Um GRT auf Ihre Wallet zu übertragen, klicken Sie auf die Schaltfläche „Konten“ oben rechts auf der Seite. - Klicken Sie auf die Schaltfläche „Senden“ neben dem GRT Konto. - Geben Sie den Betrag an GRT ein, den Sie senden möchten, und die Wallet-Adresse, an die Sie ihn senden möchten. - Klicken Sie auf „Weiter“ und bestätigen Sie Ihre Transaktion. -Bitte beachten Sie, dass Coinbase Sie bei größeren Kaufbeträgen möglicherweise 7-10 Tage warten lässt, bevor Sie den vollen Betrag in eine Krypto-Wallet überweisen. -Sie können mehr über den Erwerb von GRT auf Coinbase [hier](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) erfahren. +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Dies ist eine Schritt-für-Schritt-Anleitung für den Kauf von GRT auf Binance. -1. Gehen Sie auf [Binance](https://www.binance.com/en) und erstellen Sie ein Konto. +1. Go to [Binance](https://www.binance.com/en) and create an account. 2. Sobald Sie ein Konto erstellt haben, müssen Sie Ihre Identität durch ein Verfahren verifizieren, das als KYC (oder Know Your Customer) bekannt ist. Dies ist ein Standardverfahren für alle zentralisierten oder verwahrten Krypto-Börsen. 3. Sobald Sie Ihre Identität überprüft haben, können Sie GRT kaufen. Dazu klicken Sie auf die Schaltfläche „Jetzt kaufen“ auf dem Banner der Homepage. 4. Sie werden zu einer Seite weitergeleitet, auf der Sie die Währung auswählen können, die Sie kaufen möchten. Wählen Sie GRT. @@ -126,27 +127,27 @@ Dies ist eine Schritt-für-Schritt-Anleitung für den Kauf von GRT auf Binance. 6. Wählen Sie die Menge an GRT, die Sie kaufen möchten. 7. Überprüfen Sie Ihren Kauf und klicken Sie auf „GRT kaufen“. 8. Bestätigen Sie Ihren Kauf und Sie werden Ihr GRT in Ihrer Binance Spot Wallet sehen können. -9. Sie können GRT von Ihrem Konto auf Ihre Wallet wie [MetaMask](https://metamask.io/) abheben. - - [Um GRT auf Ihr Wallet abzuheben](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570), fügen Sie die Adresse Ihres Wallets zur Whitelist für Abhebungen hinzu. +9. You can withdraw the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). + - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. - Klicken Sie auf die Schaltfläche „Wallet“, klicken Sie auf Abheben und wählen Sie GRT. - Geben Sie den GRT-Betrag ein, den Sie senden möchten, und die Wallet-Adresse, die auf der Whitelist steht. - Klicken Sie auf „Weiter“ und bestätigen Sie Ihre Transaktion. -Sie können mehr über den Erwerb von GRT auf Binance [hier](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582) erfahren. +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap So können Sie GRT auf Uniswap kaufen. -1. Gehen Sie auf [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) und verbinden Sie Ihre Wallet. +1. Go to [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) and connect your wallet. 2. Wählen Sie den Token, von dem Sie tauschen möchten. Wählen Sie ETH. 3. Wählen Sie den Token, in den Sie tauschen möchten. Wählen Sie GRT. - - Stelle sicher, dass du den richtigen Token tauschst. Die Smart-Contract-Adresse von GRT auf Arbitrum One ist: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) + - Make sure you're swapping for the correct token. The GRT smart contract address on Arbitrum One is: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) 4. Geben Sie den Betrag an ETH ein, den Sie tauschen möchten. 5. Klicken Sie auf „Swap“. 6. Bestätigen Sie die Transaktion in Ihrer Wallet und warten Sie auf die Abwicklung der Transaktion. -Sie können mehr über den Erwerb von GRT auf Uniswap [hier](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-) erfahren. +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). ## Ether erhalten @@ -156,43 +157,43 @@ In diesem Abschnitt erfahren Sie, wie Sie Ether (ETH) erhalten können, um Trans Dies ist eine Schritt-für-Schritt-Anleitung für den Kauf von GRT auf Coinbase. -1. Gehen Sie zu [Coinbase](https://www.coinbase.com/) und erstellen Sie ein Konto. -2. Sobald Sie ein Konto erstellt haben, müssen Sie Ihre Identität durch ein Verfahren verifizieren, das als KYC (oder Know Your Customer) bekannt ist. Dies ist ein Standardverfahren für alle zentralisierten oder verwahrten Krypto-Börsen. -3. Sobald Sie Ihre Identität bestätigt haben, können Sie ETH kaufen, indem Sie auf die Schaltfläche „Kaufen/Verkaufen“ oben rechts auf der Seite klicken. -4. Wählen Sie die Währung, die Sie kaufen möchten. Wählen Sie ETH. -5. Wählen Sie die gewünschte Zahlungsmethode. -6. Geben Sie die Menge an ETH ein, die Sie kaufen möchten. -7. Überprüfen Sie Ihren Kauf und klicken Sie auf „ETH kaufen“. -8. Bestätigen Sie Ihren Kauf und Sie haben erfolgreich ETH gekauft. -9. Sie können die ETH von Ihrem Coinbase-Konto auf Ihr Wallet wie [MetaMask](https://metamask.io/) übertragen. - - Um die ETH auf Ihre Wallet zu übertragen, klicken Sie auf die Schaltfläche „Konten“ oben rechts auf der Seite. - - Klicken Sie auf die Schaltfläche „Senden“ neben dem ETH-Konto. - - Geben Sie den ETH-Betrag ein, den Sie senden möchten, und die Wallet-Adresse, an die Sie ihn senden möchten. - - Stellen Sie sicher, dass Sie an Ihre Ethereum Wallet Adresse auf Arbitrum One senden. - - Klicken Sie auf „Weiter“ und bestätigen Sie Ihre Transaktion. - -Sie können mehr über den Erwerb von ETH auf Coinbase [hier](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) erfahren. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Sobald Sie ein Konto erstellt haben, müssen Sie Ihre Identität durch ein Verfahren verifizieren, das als KYC (oder Know Your Customer) bekannt ist. Dies ist ein Standardverfahren für alle zentralisierten oder verwahrten Krypto-Börsen. +3. Sobald Sie Ihre Identität bestätigt haben, können Sie ETH kaufen, indem Sie auf die Schaltfläche „Kaufen/Verkaufen“ oben rechts auf der Seite klicken. +4. Wählen Sie die Währung, die Sie kaufen möchten. Wählen Sie ETH. +5. Wählen Sie die gewünschte Zahlungsmethode. +6. Geben Sie die Menge an ETH ein, die Sie kaufen möchten. +7. Überprüfen Sie Ihren Kauf und klicken Sie auf „ETH kaufen“. +8. Bestätigen Sie Ihren Kauf und Sie haben erfolgreich ETH gekauft. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - Um die ETH auf Ihre Wallet zu übertragen, klicken Sie auf die Schaltfläche „Konten“ oben rechts auf der Seite. + - Klicken Sie auf die Schaltfläche „Senden“ neben dem ETH-Konto. + - Geben Sie den ETH-Betrag ein, den Sie senden möchten, und die Wallet-Adresse, an die Sie ihn senden möchten. + - Stellen Sie sicher, dass Sie an Ihre Ethereum Wallet Adresse auf Arbitrum One senden. + - Klicken Sie auf „Weiter“ und bestätigen Sie Ihre Transaktion. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Dies ist eine Schritt-für-Schritt-Anleitung für den Kauf von ETH auf Binance. -1. Gehen Sie auf [Binance](https://www.binance.com/en) und erstellen Sie ein Konto. -2. Sobald Sie ein Konto erstellt haben, müssen Sie Ihre Identität durch ein Verfahren verifizieren, das als KYC (oder Know Your Customer) bekannt ist. Dies ist ein Standardverfahren für alle zentralisierten oder verwahrten Krypto-Börsen. -3. Sobald Sie Ihre Identität verifiziert haben, kaufen Sie ETH, indem Sie auf die Schaltfläche „Jetzt kaufen“ auf dem Banner der Homepage klicken. -4. Wählen Sie die Währung, die Sie kaufen möchten. Wählen Sie ETH. -5. Wählen Sie die gewünschte Zahlungsmethode. -6. Geben Sie die Menge an ETH ein, die Sie kaufen möchten. -7. Überprüfen Sie Ihren Kauf und klicken Sie auf „ETH kaufen“. -8. Bestätigen Sie Ihren Kauf und Sie werden Ihre ETH in Ihrer Binance Spot Wallet sehen. -9. Sie können die ETH von Ihrem Konto auf Ihr Wallet wie [MetaMask](https://metamask.io/) abheben. - - Um die ETH auf Ihre Wallet abzuheben, fügen Sie die Adresse Ihrer Wallet zur Abhebungs-Whitelist hinzu. - - Klicken Sie auf die Schaltfläche „Wallet“, klicken Sie auf „withdraw“ und wählen Sie ETH. - - Geben Sie den ETH-Betrag ein, den Sie senden möchten, und die Adresse der Wallet, die auf der Whitelist steht, an die Sie den Betrag senden möchten. - - Stellen Sie sicher, dass Sie an Ihre Ethereum Wallet Adresse auf Arbitrum One senden. - - Klicken Sie auf „Weiter“ und bestätigen Sie Ihre Transaktion. - -Sie können mehr über den Erwerb von ETH auf Binance [hier](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582) erfahren. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Sobald Sie ein Konto erstellt haben, müssen Sie Ihre Identität durch ein Verfahren verifizieren, das als KYC (oder Know Your Customer) bekannt ist. Dies ist ein Standardverfahren für alle zentralisierten oder verwahrten Krypto-Börsen. +3. Sobald Sie Ihre Identität verifiziert haben, kaufen Sie ETH, indem Sie auf die Schaltfläche „Jetzt kaufen“ auf dem Banner der Homepage klicken. +4. Wählen Sie die Währung, die Sie kaufen möchten. Wählen Sie ETH. +5. Wählen Sie die gewünschte Zahlungsmethode. +6. Geben Sie die Menge an ETH ein, die Sie kaufen möchten. +7. Überprüfen Sie Ihren Kauf und klicken Sie auf „ETH kaufen“. +8. Bestätigen Sie Ihren Kauf und Sie werden Ihre ETH in Ihrer Binance Spot Wallet sehen. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - Um die ETH auf Ihre Wallet abzuheben, fügen Sie die Adresse Ihrer Wallet zur Abhebungs-Whitelist hinzu. + - Klicken Sie auf die Schaltfläche „Wallet“, klicken Sie auf „withdraw“ und wählen Sie ETH. + - Geben Sie den ETH-Betrag ein, den Sie senden möchten, und die Adresse der Wallet, die auf der Whitelist steht, an die Sie den Betrag senden möchten. + - Stellen Sie sicher, dass Sie an Ihre Ethereum Wallet Adresse auf Arbitrum One senden. + - Klicken Sie auf „Weiter“ und bestätigen Sie Ihre Transaktion. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## FAQs zur Rechnungsstellung @@ -202,11 +203,11 @@ Sie müssen nicht im Voraus wissen, wie viele Abfragen Sie benötigen werden. Ih Wir empfehlen Ihnen, die Anzahl der Abfragen, die Sie benötigen, zu überschlagen, damit Sie Ihr Guthaben nicht häufig aufstocken müssen. Eine gute Schätzung für kleine bis mittelgroße Anwendungen ist, mit 1 Mio. bis 2 Mio. Abfragen pro Monat zu beginnen und die Nutzung in den ersten Wochen genau zu überwachen. Bei größeren Anwendungen ist es sinnvoll, die Anzahl der täglichen Besuche auf Ihrer Website mit der Anzahl der Abfragen zu multiplizieren, die Ihre aktivste Seite beim Öffnen auslöst. -Natürlich können sich sowohl neue als auch bestehende Nutzer an das BD-Team von Edge & Node wenden, um mehr über die voraussichtliche Nutzung zu erfahren. +Of course, both new and existing users can reach out to Edge & Node's BD team for a consult to learn more about anticipated usage. ### Kann ich GRT von meinem Rechnungssaldo abheben? -Ja, Sie können jederzeit GRT, die nicht bereits für Abfragen verwendet wurden, von Ihrem Abrechnungskonto abheben. Der Abrechnungsvertrag ist nur dafür gedacht, GRT aus dem Ethereum-Mainnet in das Arbitrum-Netzwerk zu übertragen. Wenn Sie Ihr GRT von Arbitrum zurück ins Ethereum-Mainnet transferieren möchten, müssen Sie den [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +Yes, you can always withdraw GRT that has not already been used for queries from your billing balance. The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). ### Was passiert, wenn mein Guthaben aufgebraucht ist? Werde ich eine Warnung erhalten? From 9f3af1cf4f3a3d88dd01524c95073c740d16d1ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:36 -0500 Subject: [PATCH 0527/1534] New translations billing.mdx (Italian) --- website/src/pages/it/subgraphs/billing.mdx | 59 +++++++++++----------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/website/src/pages/it/subgraphs/billing.mdx b/website/src/pages/it/subgraphs/billing.mdx index a88a51c3adff..c9f380bb022c 100644 --- a/website/src/pages/it/subgraphs/billing.mdx +++ b/website/src/pages/it/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Billing --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From 062596a4525e7883a4dc193c743f6cc77327691d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:40 -0500 Subject: [PATCH 0528/1534] New translations billing.mdx (Japanese) --- website/src/pages/ja/subgraphs/billing.mdx | 67 +++++++++++----------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/website/src/pages/ja/subgraphs/billing.mdx b/website/src/pages/ja/subgraphs/billing.mdx index 2155969a9018..9967aa377644 100644 --- a/website/src/pages/ja/subgraphs/billing.mdx +++ b/website/src/pages/ja/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: 請求書 --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,43 +157,43 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. - -CoinbaseでETHを入手する詳細については、[こちら](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency)をご覧いただけます。 +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance これは、BinanceでETHを購入するためのステップバイステップのガイドになります。 -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. 身元を確認したら、ホームページバナーの「Buy Now」ボタンをクリックしてETHを購入してください。 -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. 購入を確認すると、BinanceのSpotウォレットにETHが表示されます。 -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - 「ウォレット」ボタンをクリックし、その後「引き出し」をクリックし、ETHを選択してください。 - - 送金したいETHの金額と送金先のホワイトリストに登録されているウォレットアドレスを入力してください。 - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. - -BinanceでETHを入手する詳細については、[こちら](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582)をご覧いただけます。 +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. 身元を確認したら、ホームページバナーの「Buy Now」ボタンをクリックしてETHを購入してください。 +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. 購入を確認すると、BinanceのSpotウォレットにETHが表示されます。 +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - 「ウォレット」ボタンをクリックし、その後「引き出し」をクリックし、ETHを選択してください。 + - 送金したいETHの金額と送金先のホワイトリストに登録されているウォレットアドレスを入力してください。 + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Billing FAQs From 1048ae44a73c0a932f6d1d5eedaf02180af0e505 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:41 -0500 Subject: [PATCH 0529/1534] New translations billing.mdx (Korean) --- website/src/pages/ko/subgraphs/billing.mdx | 59 +++++++++++----------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/website/src/pages/ko/subgraphs/billing.mdx b/website/src/pages/ko/subgraphs/billing.mdx index a88a51c3adff..c9f380bb022c 100644 --- a/website/src/pages/ko/subgraphs/billing.mdx +++ b/website/src/pages/ko/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Billing --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From 1f21c4f9b1a330d9b7a3347ffacdf2dbeb0e40f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:43 -0500 Subject: [PATCH 0530/1534] New translations billing.mdx (Dutch) --- website/src/pages/nl/subgraphs/billing.mdx | 59 +++++++++++----------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/website/src/pages/nl/subgraphs/billing.mdx b/website/src/pages/nl/subgraphs/billing.mdx index a88a51c3adff..c9f380bb022c 100644 --- a/website/src/pages/nl/subgraphs/billing.mdx +++ b/website/src/pages/nl/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Billing --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From 124d7eceba04c3fcdb3046592c438873eb9a708e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:44 -0500 Subject: [PATCH 0531/1534] New translations billing.mdx (Polish) --- website/src/pages/pl/subgraphs/billing.mdx | 61 +++++++++++----------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/website/src/pages/pl/subgraphs/billing.mdx b/website/src/pages/pl/subgraphs/billing.mdx index a88a51c3adff..511ac8067271 100644 --- a/website/src/pages/pl/subgraphs/billing.mdx +++ b/website/src/pages/pl/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Billing --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,7 +38,8 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) -- [TransferTo](https://transferto.xyz/swap) + +- [Przeniesienie do](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From fa6ec4bffbc618c4a0744b6a30d09688c3dbdd39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:45 -0500 Subject: [PATCH 0532/1534] New translations billing.mdx (Portuguese) --- website/src/pages/pt/subgraphs/billing.mdx | 117 +++++++++++---------- 1 file changed, 59 insertions(+), 58 deletions(-) diff --git a/website/src/pages/pt/subgraphs/billing.mdx b/website/src/pages/pt/subgraphs/billing.mdx index f67126e2a066..f73ae48ff725 100644 --- a/website/src/pages/pt/subgraphs/billing.mdx +++ b/website/src/pages/pt/subgraphs/billing.mdx @@ -2,20 +2,20 @@ title: Cobranças --- -## Planos de Cobrança para Subgraphs +## Planos de Query Há dois planos disponíveis para queries de subgraphs na Graph Network. -- **Plano Grátis**: O Plano Grátis inclui 100 mil queries mensais gratuitas com acesso total ao ambiente de teste do Subgraph Studio. Este plano é desenhado para amadores, hackathoners e quem tem projetos paralelos para experimentar o The Graph antes de escalar o seu dApp. +- **Plano Grátis**: Inclui 100.000 queries grátis por mês, com acesso ilimitado ao ambiente de testes do Subgraph Studio. O plano é feito para entusiastas, participantes de hackathons, e para quem tem projetos paralelos para experimentar o The Graph antes de escalar o seu dapp. -- **Plano de Crescimento**: O Plano de Crescimento inclui tudo no Plano Grátis; ademais, todas as queries após as 100 mil queries mensais exigirão pagamentos em GRT ou cartão de crédito. O Plano de Crescimento é flexível o suficiente para cobrir equipas com dApps estabelecidos em uma variedade de casos de uso. +- **Plano de Crescimento**: Inclui tudo no Plano Grátis, com todos os queries após a cota de 100.000 mensais exigindo pagamentos com cartão de crédito ou GRT. Este plano é flexível o suficiente para cobrir equipes que estabeleceram dapps numa variedade de casos de uso. ## Pagamentos de Queries com cartão de crédito -- To set up billing with credit/debit cards, users should access Subgraph Studio (https://thegraph.com/studio/) - 1. Entre na [página de Cobranças do Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +- Para configurar opções de pagamento no cartão, os utilizadores devem acessar o Subgraph Studio (https://thegraph.com/studio/) + 1. Vá para a [página de cobrança do Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). 2. Clique no botão "Connect Wallet" (Conectar Carteira) no canto superior direito da página. Isto levará à página de seleção de carteira; lá, selecione a sua carteira e clique em "Connect". 3. Escolha "atualizar plano" se está a atualizar do Plano Grátis, ou escolha "Gerir plano" se já adicionou GRT ao seu saldo de cobrança no passado. Depois, é possível estimar o número de queries para conseguir uma estimativa de preço, mas isto não é obrigatório. 4. Para escolher um pagamento em cartão de crédito, escolha "Cartão de crédito" como o método de pagamento e preencha os campos. Quem já usou o Stripe antes poderá preencher os seus detalhes automaticamente com o recurso Link. @@ -37,20 +37,21 @@ Para pagar por queries, é necessário ter GRT no Arbitrum. Veja algumas maneira - Caso já tenha GRT no Ethereum, é possível enviá-lo ao Arbitrum via bridge. Isto é possível via a opção de bridging de GRT providenciada no Subgraph Studio ou uma das seguintes bridges: -- [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) +- [Bridge no Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - Caso tenha outros ativos no Arbitrum, é possível trocá-los por GRT através de um protocolo de troca como o Uniswap. - Também é possível adquirir GRT diretamente no Arbitrum via uma exchange descentralizada. -> Esta seção presume que já tens GRT na sua carteira, e que está na rede Arbitrum. Caso não tenha GRT, aprenda como adquirir o token [aqui](#getting-grt). +> Esta seção presume que já tens GRT na sua carteira, e que estás na rede Arbitrum. Caso não tenha GRT, veja como adquirir o token [aqui](#getting-grt). Quando fizeres bridge do GRT, será possível adicioná-lo ao seu saldo de cobranças. ### Depósitos de GRT com uma carteira -1. Entre na [página de Cobranças do Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +1. Vá para a [página de cobrança do Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). 2. Clique no botão "Connect Wallet" (Conectar Carteira) no canto superior direito da página. Isto levará à página de seleção de carteira; lá, selecione a sua carteira e clique em "Connect". 3. Selecione o botão "Manage" (Gerir) perto do canto superior direito. Utilizadores noviços verão uma opção para "Upgrade to Growth Plan" (Atualizar para o Plano de Crescimento), enquanto os frequentes devem clicar em "Deposit from wallet" (Depositar de carteira). 4. Use o slider para estimar o número de queries que espera fazer mensalmente. @@ -67,18 +68,18 @@ Quando fizeres bridge do GRT, será possível adicioná-lo ao seu saldo de cobra ### Saques de GRT com carteira -1. Entre na [página de Cobranças do Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +1. Vá para a [página de cobrança do Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). 2. Clique no botão "Connect Wallet" (Conectar Carteira) no canto superior direito da página, selecione a sua carteira e clique em "Connect". -3. Click the "Manage" button at the top right corner of the page. Select "Withdraw GRT". A side panel will appear. +3. Clique o botão "Manage" (Preferências) no canto superior direito. Selecione "Withdraw GRT" (Sacar GRT) para abrir um painel lateral. 4. Insira a quantia de GRT que quer sacar. 5. Clique em 'Withdraw GRT' (Sacar GRT) para sacar o GRT do seu saldo. Assine a transação associada na sua carteira — isto custa gas. O GRT será enviado à sua carteira Arbitrum. 6. Quando a transação for confirmada, verá o GRT sacado do seu saldo na sua carteira Arbitrum. ### Depósitos de GRT com uma carteira multisig -1. Entre na [página de Cobranças do Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). -2. Clique no botão "Connect Wallet" no canto superior direito da página, selecione a sua carteira e clique em "Connect". Se usar o [Gnosis-Safe](https://gnosis-safe.io/), poderá conectar a sua multisig além da sua carteira de assinatura. Depois, assine a mensagem associada — isto não custa gas. -3. Selecione o botão "Manage" (Gerir) perto do canto superior direito. Utilizadores noviços verão uma opção para "Upgrade to Growth Plan" (Atualizar para o Plano de Crescimento), enquanto os frequentes devem clicar em "Deposit from wallet" (Depositar de carteira). +1. Vá para a [página de cobrança do Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +2. Clique em "Connect Wallet" (Conectar Carteira) no canto superior direito, selecione a sua carteira e clique em "Connect" (Conectar). Se usar o Gnosis-Safe, poderá conectar a sua multisig além da sua carteira de assinatura. Depois, assine a mensagem associada. Isto não tem custo de gas. +3. Selecione o botão "Manage" (Preferências) perto do canto superior direito. Utilizadores noviços verão uma opção para "Upgrade to Growth Plan" (Atualizar para o Plano de Crescimento), enquanto os frequentes devem clicar em "Deposit from wallet" (Depositar de carteira). 4. Use o slider para estimar o número de queries que espera fazer mensalmente. - Para sugestões sobre o número de queries que deve usar, veja a nossa página de **Perguntas Frequentes**. 5. Escolha "Criptomoedas". Atualmente, o GRT é a única criptomoeda aceita na Graph Network. @@ -106,19 +107,19 @@ Este é um guia passo a passo sobre como comprar GRT na Coinbase. 6. Selecione a quantia de GRT que deseja comprar. 7. Verifique a sua compra, e após verificar, clique em "Comprar GRT". 8. Confirme a sua compra, e o GRT será comprado com sucesso. -9. É possível transferir o GRT da sua conta à sua carteira preferida, como o [MetaMask](https://metamask.io/). +9. Poderá transferir o GRT da sua conta a carteiras na sua posse, como no [MetaMask](https://metamask.io/). - Para transferir o GRT à sua carteira, clique no botão "Contas" no canto superior direito da página. - Clique em "Enviar", próximo à conta de GRT. - Insira a quantia de GRT que deseja enviar, e o endereço da carteira que a receberá. - Clique em "Continuar" e confirme a sua transação. Nota: para compras maiores, a Coinbase pode demorar de 7 a 10 dias antes de transferir a quantia completa a uma carteira. -Aprenda mais sobre adquirir GRT na Coinbase [aqui](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +Saiba mais sobre adquirir GRT na Coinbase [aqui](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Este é um guia passo a passo para comprar GRT na Binance. -1. Crie uma conta na [Binance](https://www.binance.com/en). +1. Crie uma conta na [Binance](https://www.binance.com/en/). 2. Quando tiver criado uma conta, precisará verificar a sua identidade através do processo KYC (sigla em inglês para "Conheça o Seu Cliente"). Este é um processo comum em todas as exchanges de cripto, centralizadas ou custodiais. 3. Após verificar a sua identidade, poderá comprar GRT no botão "Comprar Agora", no canto superior direito da página. 4. O botão levará-lhe a uma página onde pode selecionar a moeda que deseja comprar. Selecione GRT. @@ -126,27 +127,27 @@ Este é um guia passo a passo para comprar GRT na Binance. 6. Selecione a quantia de GRT que deseja comprar. 7. Verifique a sua compra e clique em "Comprar GRT". 8. Confirme a sua compra, e logo o seu GRT aparecerá na sua Carteira Spot da Binance. -9. É possível transferir o GRT da sua conta à sua carteira preferida, como o [MetaMask](https://metamask.io/). - - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. +9. Poderá sacar o GRT da sua conta para carteiras na sua posse, como no [MetaMask](https://metamask.io/). + - Para [sacar o GRT](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) e adicionar à sua carteira, adicione o endereço da sua carteira para a whitelist de saque. - Clique no botão "wallet", clique em "sacar", e selecione GRT. - Insira a quantia de GRT que deseja enviar, e o endereço da carteira na whitelist à qual quer enviar. - Clique em "Continuar" e confirme a sua transação. -Aprenda mais sobre adquirir GRT na Binance [aqui](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +Saiba mais sobre adquirir GRT na Binance [aqui](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap Este é um guia passo a passo sobre como comprar GRT no Uniswap. -1. Entre no [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) e conecte a sua carteira. +1. Conecte a sua carteira à [Uniswap](https://app.uniswap.org/swap?chain=arbitrum). 2. Selecione o token que quer trocar — no caso, ETH. 3. Selecione o token pelo que quer trocar — no caso, GRT. - - Certifique-se que trocará pelo token correto. O endereço de contrato inteligente do GRT no Arbitrum One é: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) + - Verifique se está a trocar para o token correto. O endereço do contrato inteligente na Arbitrum One é: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) 4. Insira a quantia de ETH que deseja trocar. 5. Clique em "Swap". 6. Confirme a transação na sua carteira e espere que a transação seja processada. -Aprenda mais sobre como adquirir GRT no Uniswap [aqui](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +Saiba mais sobre adquirir GRT na Uniswap [aqui](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). ## Como adquirir Ether @@ -156,58 +157,58 @@ Esta seção explicará como adquirir Ether (ETH) para pagar por taxas de transa Este é um guia passo a passo sobre como comprar ETH na Coinbase. -1. Crie uma conta na [Coinbase](https://www.coinbase.com/). -2. Quando tiver criado uma conta, verifique a sua identidade através do processo KYC (sigla em inglês para "Conheça o Seu Cliente"). Este processo é comum em todas as corretoras de cripto, centralizadas ou custodiais. -3. Após verificar a sua identidade, compre ETH no botão "Comprar/Vender", no canto superior direito da página. -4. Selecione a moeda que deseja comprar — no caso, ETH. -5. Selecione o seu método de pagamento preferido. -6. Insira a quantia de ETH que deseja comprar. -7. Reveja a sua compra e clique em "Comprar ETH". -8. Confirme a sua compra, e o ETH será adquirido com sucesso. -9. Pode transferir o ETH da sua conta à sua carteira de cripto preferida, como o [MetaMask](https://metamask.io/). - - Para transferir o ETH à sua carteira, clique no botão "Contas" no canto superior direito da página. - - Clique em "Enviar", próximo à conta de ETH. - - Insira a quantia de ETH que deseja enviar, e o endereço da carteira que a receberá. - - Certifique-se que enviará ao endereço da sua carteira de Ethereum no Arbitrum One. - - Clique em "Continuar" e confirme a sua transação. - -Saiba mais sobre como adquirir ETH na Coinbase [aqui](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +1. Crie uma conta na [Coinbase](https://www.coinbase.com/). +2. Quando tiver criado uma conta, verifique a sua identidade através do processo KYC (sigla em inglês para "Conheça o Seu Cliente"). Este processo é comum em todas as corretoras de cripto, centralizadas ou custodiais. +3. Após verificar a sua identidade, compre ETH no botão "Comprar/Vender", no canto superior direito da página. +4. Selecione a moeda que deseja comprar — no caso, ETH. +5. Selecione o seu método de pagamento preferido. +6. Insira a quantia de ETH que deseja comprar. +7. Reveja a sua compra e clique em "Comprar ETH". +8. Confirme a sua compra, e o ETH será adquirido com sucesso. +9. Poderá transferir o ETH da sua conta na Coinbase a carteiras na sua posse, como no [MetaMask](https://metamask.io/). + - Para transferir o ETH à sua carteira, clique no botão "Contas" no canto superior direito da página. + - Clique em "Enviar", próximo à conta de ETH. + - Insira a quantia de ETH que deseja enviar, e o endereço da carteira que a receberá. + - Certifique-se que enviará ao endereço da sua carteira de Ethereum no Arbitrum One. + - Clique em "Continuar" e confirme a sua transação. + +Saiba mais sobre adquirir ETH na Coinbase [aqui](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Este é um guia passo a passo sobre como comprar ETH na Binance. -1. Crie uma conta na [Binance](https://www.binance.com/en). -2. Quando tiver criado uma conta, verifique a sua identidade através do processo KYC (sigla em inglês para "Conheça o Seu Cliente"). Este processo é comum em todas as corretoras de cripto, centralizadas ou custodiais. -3. Após verificar a sua identidade, poderá comprar ETH no botão "Comprar/Vender", no banner da página principal. -4. Selecione a moeda que deseja comprar — no caso, ETH. -5. Selecione o seu método de pagamento preferido. -6. Insira a quantia de ETH que deseja comprar. -7. Reveja a sua compra e clique em "Comprar ETH". -8. Confirme a sua compra, e o seu ETH aparecerá na sua Carteira Spot da Binance. -9. Pode transferir o ETH da sua conta à sua carteira preferida, como o [MetaMask](https://metamask.io/). - - Para sacar o ETH à sua carteira, adicione o endereço da sua carteira à whitelist de saques. - - Clique no botão "wallet", clique em "sacar" (withdraw), e selecione ETH. - - Insira a quantia de ETH que deseja enviar, e o endereço da carteira na whitelist à qual quer enviar. - - Certifique-se que enviará ao endereço da sua carteira de Ethereum no Arbitrum One. - - Clique em "Continuar" e confirme a sua transação. - -Saiba mais sobre como adquirir ETH na Binance [aqui](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +1. Crie uma conta na [Binance](https://www.binance.com/en/). +2. Quando tiver criado uma conta, verifique a sua identidade através do processo KYC (sigla em inglês para "Conheça o Seu Cliente"). Este processo é comum em todas as corretoras de cripto, centralizadas ou custodiais. +3. Após verificar a sua identidade, poderá comprar ETH no botão "Comprar/Vender", no banner da página principal. +4. Selecione a moeda que deseja comprar — no caso, ETH. +5. Selecione o seu método de pagamento preferido. +6. Insira a quantia de ETH que deseja comprar. +7. Reveja a sua compra e clique em "Comprar ETH". +8. Confirme a sua compra, e o seu ETH aparecerá na sua Carteira Spot da Binance. +9. Poderá sacar o ETH da sua conta para carteiras na sua posse, como no [MetaMask](https://metamask.io/). + - Para sacar o ETH à sua carteira, adicione o endereço da sua carteira à whitelist de saques. + - Clique no botão "wallet", clique em "sacar" (withdraw), e selecione ETH. + - Insira a quantia de ETH que deseja enviar, e o endereço da carteira na whitelist à qual quer enviar. + - Certifique-se que enviará ao endereço da sua carteira de Ethereum no Arbitrum One. + - Clique em "Continuar" e confirme a sua transação. + +Saiba mais sobre adquirir ETH na Binance [aqui](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Perguntas Frequentes Sobre Cobranças ### De quantas queries precisarei? -You don't need to know how many queries you'll need in advance. You will only be charged for what you use and you can withdraw GRT from your account at any time. +Não é necessário saber de quantos queries precisará com antecedência. O utilizador só será cobrado pelo que pode usar, e poderá sacar GRT da própria conta a qualquer hora. Recomendamos estimar mais queries do que necessário para que não precise encher o seu saldo com frequência. Uma boa estimativa para aplicativos pequenos ou médios é começar com 1 a 2 milhões de queries por mês e monitorar atenciosamente o uso nas primeiras semanas. Para aplicativos maiores, uma boa estimativa consiste em utilizar o número de visitas diárias ao seu site multiplicado ao número de queries que a sua página mais ativa faz ao abrir. -Claro que todos os utilizadores, novatos ou experientes, podem contactar a equipa de BD da Edge & Node para aprender mais sobre uso antecipado. +Utilizadores noviços e preexistentes podem consultar a equipa de Desenvolvimento Comercial (BD) da Edge & Node para saber mais sobre uso antecipado. ### Posso sacar GRT do meu saldo de cobrança? -Sim, sempre é possível sacar GRT que não já foi usado para queries do seu saldo de cobrança. O contrato inteligente só é projetado para bridgear GRT da mainnet Ethereum até a rede Arbitrum. Se quiser transferir o seu GRT do Arbitrum de volta à mainnet Ethereum, precisará usar a [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +Sim, sempre é possível sacar GRT do seu saldo de cobrança que já não tenha sido usado para queries. O contrato inteligente é projetado apenas para fazer bridge de GRT da mainnet Ethereum até a rede Arbitrum. Se quiser transferir o seu GRT do Arbitrum de volta à mainnet Ethereum, use a [bridge do Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161). -### What happens when my billing balance runs out? Will I get a warning? +### O que acontece se o meu saldo de cobrança esgotar? Receberei uma notificação? Serão enviadas várias notificações de email antes do seu saldo de cobrança ser esvaziado. From 7d03509ea04372fd5f6a8c7207adee9e38b1e34a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:46 -0500 Subject: [PATCH 0533/1534] New translations billing.mdx (Russian) --- website/src/pages/ru/subgraphs/billing.mdx | 105 +++++++++++---------- 1 file changed, 53 insertions(+), 52 deletions(-) diff --git a/website/src/pages/ru/subgraphs/billing.mdx b/website/src/pages/ru/subgraphs/billing.mdx index f75dc91b4631..0a7daa3442d0 100644 --- a/website/src/pages/ru/subgraphs/billing.mdx +++ b/website/src/pages/ru/subgraphs/billing.mdx @@ -2,20 +2,20 @@ title: Выставление счетов --- -## Тарифные планы для субграфов +## Querying Plans Существует два плана для выполнения запросов к субграфам в The Graph Network. -- **Бесплатный план**: Бесплатный план включает 100,000 бесплатных запросов в месяц с полным доступом к тестовой среде Subgraph Studio. Этот план разработан для любителей, участников хакатонов и тех, у кого есть сторонние проекты, чтобы они могли попробовать The Graph перед масштабированием своего децентрализованного приложения. +- **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. -- **План роста**: План роста включает все возможности бесплатного плана, но все запросы, превышающие 100,000 в месяц, требуют оплаты в GRT или кредитной картой. Этот план достаточно гибок, чтобы поддерживать команды, которые уже запустили децентрализованные приложения для различных сценариев использования. +- **Growth Plan**: The Growth Plan includes everything in the Free Plan with all queries after 100,000 monthly queries requiring payments with GRT or credit card. The Growth Plan is flexible enough to cover teams that have established dapps across a variety of use cases. ## Оплата запросов с помощью кредитной карты - Чтобы настроить оплату с помощью кредитных/дебетовых карт, пользователи должны зайти в Subgraph Studio (https://thegraph.com/studio/) - 1. Перейдите на[страницу оплаты Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Нажмите на кнопку «Connect Wallet» в правом верхнем углу страницы. Вы будете перенаправлены на страницу выбора кошелька. Выберите свой кошелек и нажмите «Connect». 3. Выберите «Обновление плана», если Вы переходите с бесплатного плана, или «Управление планом», если Вы уже ранее добавили GRT на свой баланс для оплаты. Далее Вы можете оценить количество запросов, чтобы получить примерную стоимость, но это не обязательный шаг. 4. Чтобы выбрать оплату кредитной картой, выберите «Credit card» как способ оплаты и заполните информацию о своей карте. Те, кто ранее использовал Stripe, могут воспользоваться функцией Link для автоматического заполнения данных. @@ -37,24 +37,25 @@ title: Выставление счетов - Если у Вас уже есть GRT на Ethereum, Вы можете перенести его на Arbitrum. Вы можете сделать это через опцию переноса GRT, доступную в Subgraph Studio, или с помощью одного из следующих мостов: -- [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) +- [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - Если у Вас уже есть активы на Arbitrum, Вы можете обменять их на GRT через протокол обмена, такой как Uniswap. - В качестве альтернативы, Вы можете приобрести GRT напрямую на Arbitrum через децентрализованную биржу. -> Этот раздел предполагает, что у Вас уже есть GRT в Вашем кошельке, и Вы находитесь в сети Arbitrum. Если у Вас нет GRT, Вы можете узнать, как его получить, [здесь](#getting-grt). +> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). После переноса GRT Вы можете добавить его на баланс для оплаты. ### Добавление токенов GRT с помощью кошелька -1. Перейдите на [страницу оплаты в Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Нажмите на кнопку «Connect Wallet» в правом верхнем углу страницы. Вы будете перенаправлены на страницу выбора кошелька. Выберите свой кошелек и нажмите «Connect». 3. Нажмите кнопку «Управление» в правом верхнем углу. Новые пользователи увидят опцию «Обновить до плана Роста», а те, кто пользовался ранее — «Пополнение с кошелька». 4. Используйте ползунок, чтобы оценить количество запросов, которое Вы планируете выполнять ежемесячно. - - Рекомендации по количеству запросов, которые Вы можете использовать, можно найти на нашей странице **Часто задаваемые вопросы**. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. 5. Выберите «Криптовалюта». В настоящее время GRT — единственная криптовалюта, принимаемая в The Graph Network. 6. Выберите количество месяцев, за которые Вы хотели бы внести предоплату. - Предоплата не обязывает Вас к дальнейшему использованию. С Вас будет взиматься плата только за то, что Вы используете, и Вы сможете вывести свой баланс в любое время. @@ -67,7 +68,7 @@ title: Выставление счетов ### Вывод токенов GRT с помощью кошелька -1. Перейдите на [страницу для оплаты Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Нажмите на кнопку «Подключить кошелек» в правом верхнем углу страницы. Выберите свой кошелек и нажмите «Подключить». 3. Нажмите кнопку «Управление» в правом верхнем углу страницы. Выберите «Вывести GRT». Появится боковая панель. 4. Введите сумму GRT, которую хотите вывести. @@ -76,11 +77,11 @@ title: Выставление счетов ### Добавление токенов GRT с помощью кошелька с мультиподписью -1. Перейдите на [страницу оплаты Subgraph Studio](https://thegraph.com/studio/subgraphs/billing/). -2. Кликните «Подключить кошелек» в правом верхнем углу страницы. Выберите свой кошелек и нажмите «Подключить». Если Вы используете [Gnosis-Safe](https://gnosis-safe.io/), Вы сможете подключить как стандартный кошелёк, так и кошелёк с мультиподписью. Затем подпишите соответствующую транзакцию. За это Вам не придётся платить комиссию. +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. 3. Нажмите кнопку «Управление» в правом верхнем углу. Новые пользователи увидят опцию «Обновить до плана Роста», а те, кто пользовался ранее — «Пополнение с кошелька». 4. Используйте ползунок, чтобы оценить количество запросов, которое Вы планируете выполнять ежемесячно. - - Рекомендации по количеству запросов, которые Вы можете использовать, можно найти на нашей странице **Часто задаваемые вопросы**. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. 5. Выберите «Криптовалюта». В настоящее время GRT — единственная криптовалюта, принимаемая в The Graph Network. 6. Выберите количество месяцев, за которые Вы хотели бы внести предоплату. - Предоплата не обязывает Вас к дальнейшему использованию. С Вас будет взиматься плата только за то, что Вы используете, и Вы сможете вывести свой баланс в любое время. @@ -98,7 +99,7 @@ title: Выставление счетов Далее будет представлено пошаговое руководство по приобретению токена GRT на Coinbase. -1. Перейдите на [Coinbase](https://www.coinbase.com/) и создайте учетную запись. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. 2. После того как Вы создали учетную запись, Вам нужно будет подтвердить свою личность с помощью процесса, известного как KYC (или Know Your Customer). Это стандартная процедура для всех централизованных или кастодиальных криптобирж. 3. После того как Вы подтвердили свою личность, Вы можете приобрести токены ETH нажав на кнопку "Купить/Продать" в правом верхнем углу страницы. 4. Выберите валюту, которую хотите купить. Выберите GRT. @@ -106,19 +107,19 @@ title: Выставление счетов 6. Выберите количество токенов GRT, которое хотите приобрести. 7. Проверьте все данные о приобретении. Проверьте все данные о приобретении и нажмите «Купить GRT». 8. Подтвердите покупку. Подтвердите покупку - Вы успешно приобрели токены GRT. -9. Вы можете перевести токены GRT со своей учетной записи на свой кошелек, например, на [MetaMask](https://metamask.io/). +9. You can transfer the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). - Чтобы перевести токены GRT на свой кошелек, нажмите кнопку «Учетные записи» в правом верхнем углу страницы. - Нажмите на кнопку «Отправить» рядом с учетной записью GRT. - Введите сумму GRT, которую хотите отправить, и адрес кошелька, на который хотите её отправить. - Нажмите «Продолжить» и подтвердите транзакцию. -Обратите внимание, что при больших суммах покупки Coinbase может потребовать от Вас подождать 7-10 дней, прежде чем переведет полную сумму на кошелек. -Получить больше информации о покупке GRT на Coinbase Вы можете [здесь](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Далее будет представлено пошаговое руководство по приобретению токена GRT на Binance. -1. Перейдите на [Binance](https://www.binance.com/en) и создайте учетную запись. +1. Go to [Binance](https://www.binance.com/en) and create an account. 2. После того как Вы создали учетную запись, Вам нужно будет подтвердить свою личность с помощью процесса, известного как KYC (или Know Your Customer). Это стандартная процедура для всех централизованных или кастодиальных криптобирж. 3. После того как Вы подтвердили свою личность, Вы можете приобрести токены GRT. Вы можете сделать это, нажав на кнопку «Купить сейчас» на баннере главной страницы. 4. Вы попадете на страницу, где сможете выбрать валюту, которую хотите приобрести. Выберите GRT. @@ -126,27 +127,27 @@ title: Выставление счетов 6. Выберите количество токенов GRT, которое хотите приобрести. 7. Проверьте все данные о приобретении и нажмите «Купить GRT». 8. Подтвердите покупку, и Вы сможете увидеть GRT в своем кошельке Binance Spot. -9. Вы можете вывести GRT со своей учетной записи на свой окошелек, например, на [MetaMask](https://metamask.io/). - - [Чтобы вывести](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) GRT на свой кошелек, добавьте адрес кошелька в белый список вывода. +9. You can withdraw the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). + - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. - Нажмите на кнопку «кошелек», нажмите «вывести» и выберите GRT. - Введите сумму GRT, которую хотите отправить, и адрес кошелька из белого списка, на который Вы хотите её отправить. - Нажмите «Продолжить» и подтвердите транзакцию. -Получить больше информации о покупке GRT на Binance Вы можете [здесь](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap Так Вы можете приобрести GRT на Uniswap. -1. Перейдите на [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) и подключите свой кошелек. +1. Go to [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) and connect your wallet. 2. Выберите токен, который хотите обменять. Выберите ETH. 3. Выберите токен, на который хотите произвести обмен. Выберите GRT. - - Убедитесь, что выбираете правильный токен. Адрес смарт-контракта GRT в сети Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) + - Make sure you're swapping for the correct token. The GRT smart contract address on Arbitrum One is: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) 4. Введите количество ETH, которое хотите обменять. 5. Нажмите «Обменять». 6. Подтвердите транзакцию в своем кошельке и дождитесь ее обработки. -Получить больше информации о покупке GRT на Uniswap Вы можете [здесь](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). ## Получение Ether @@ -156,20 +157,20 @@ title: Выставление счетов Далее будет представлено пошаговое руководство по приобретению токена ETH на Coinbase. -1. Перейдите на [Coinbase](https://www.coinbase.com/) и создайте учетную запись. -2. После того как Вы создали учетную запись, Вам нужно будет подтвердить свою личность с помощью процесса, известного как KYC (или Know Your Customer). Это стандартная процедура для всех централизованных или кастодиальных криптобирж. -3. После того как Вы подтвердили свою личность, Вы можете приобрести токены ETH нажав на кнопку "Купить/Продать" в правом верхнем углу страницы. -4. Выберите валюту, которую хотите купить. Выберите ETH. -5. Выберите предпочитаемый способ оплаты. -6. Введите количество ETH, которое хотите приобрести. -7. Проверьте все данные о приобретении и нажмите «Купить ETH». -8. Подтвердите покупку. Вы успешно приобрели токены ETH. -9. Вы можете перевести токены ETH со своей учетной записи на свой кошелек, например, на [MetaMask](https://metamask.io/). - - Чтобы перевести ETH на свой кошелек, нажмите кнопку «Учетные записи» в правом верхнем углу страницы. - - Нажмите на кнопку «Отправить» рядом с учетной записью ETH. - - Введите сумму ETH которую хотите отправить, и адрес кошелька, на который хотите её отправить. - - Убедитесь, что делаете перевод на адрес своего Ethereum-кошелька в сети Arbitrum One. - - Нажмите «Продолжить» и подтвердите транзакцию. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. После того как Вы создали учетную запись, Вам нужно будет подтвердить свою личность с помощью процесса, известного как KYC (или Know Your Customer). Это стандартная процедура для всех централизованных или кастодиальных криптобирж. +3. После того как Вы подтвердили свою личность, Вы можете приобрести токены ETH нажав на кнопку "Купить/Продать" в правом верхнем углу страницы. +4. Выберите валюту, которую хотите купить. Выберите ETH. +5. Выберите предпочитаемый способ оплаты. +6. Введите количество ETH, которое хотите приобрести. +7. Проверьте все данные о приобретении и нажмите «Купить ETH». +8. Подтвердите покупку. Вы успешно приобрели токены ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - Чтобы перевести ETH на свой кошелек, нажмите кнопку «Учетные записи» в правом верхнем углу страницы. + - Нажмите на кнопку «Отправить» рядом с учетной записью ETH. + - Введите сумму ETH которую хотите отправить, и адрес кошелька, на который хотите её отправить. + - Убедитесь, что делаете перевод на адрес своего Ethereum-кошелька в сети Arbitrum One. + - Нажмите «Продолжить» и подтвердите транзакцию. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Перейдите на [Binance](https://www.binance.com/en) и создайте учетную запись. -2. После того как Вы создали учетную запись, Вам нужно будет подтвердить свою личность с помощью процесса, известного как KYC (или Know Your Customer). Это стандартная процедура для всех централизованных или кастодиальных криптобирж. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Выберите валюту, которую хотите купить. Выберите ETH. -5. Выберите предпочитаемый способ оплаты. -6. Введите количество ETH, которое хотите приобрести. -7. Проверьте все данные о приобретении и нажмите «Купить ETH». -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. Вы можете вывести ETH со своей учетной записи на свой кошелек, например, на [MetaMask](https://metamask.io/). - - Чтобы вывести ETH на свой кошелек, добавьте адрес кошелька в белый список вывода. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Убедитесь, что делаете перевод на адрес своего Ethereum-кошелька в сети Arbitrum One. - - Нажмите «Продолжить» и подтвердите транзакцию. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. После того как Вы создали учетную запись, Вам нужно будет подтвердить свою личность с помощью процесса, известного как KYC (или Know Your Customer). Это стандартная процедура для всех централизованных или кастодиальных криптобирж. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Выберите валюту, которую хотите купить. Выберите ETH. +5. Выберите предпочитаемый способ оплаты. +6. Введите количество ETH, которое хотите приобрести. +7. Проверьте все данные о приобретении и нажмите «Купить ETH». +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - Чтобы вывести ETH на свой кошелек, добавьте адрес кошелька в белый список вывода. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Убедитесь, что делаете перевод на адрес своего Ethereum-кошелька в сети Arbitrum One. + - Нажмите «Продолжить» и подтвердите транзакцию. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). @@ -202,11 +203,11 @@ You can learn more about getting ETH on Binance [here](https://www.binance.com/e Мы рекомендуем переоценить количество запросов, чтобы Вам не приходилось часто пополнять баланс. Хорошей оценкой для небольших и средних приложений будет начать с 1–2 млн запросов в месяц и внимательно следить за использованием в первые недели. Для более крупных приложений хорошей оценкой будет использовать количество ежедневных посещений Вашего сайта, умноженное на количество запросов, которые делает Ваша самая активная страница при открытии. -Разумеется, как новые, так и старые пользователи могут обратиться к команде BD Edge & Node за консультацией, чтобы узнать больше о предполагаемом использовании. +Of course, both new and existing users can reach out to Edge & Node's BD team for a consult to learn more about anticipated usage. ### Могу ли я вывести GRT со своего платежного баланса? -Да, Вы всегда можете вывести GRT, которые еще не были использованы для запросов, с Вашего баланса. Контракт на выставление счетов предназначен только для моста GRT из основной сети Ethereum в сеть Arbitrum. Если Вы хотите перенести свой GRT из Arbitrum обратно в сеть Ethereum, Вам необходимо использовать [Мост Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161). +Yes, you can always withdraw GRT that has not already been used for queries from your billing balance. The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). ### Что произойдет, когда мой платежный баланс закончится? Получу ли я предупреждение? From 348fc324c52421988811689ec91df0353fd6c948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:47 -0500 Subject: [PATCH 0534/1534] New translations billing.mdx (Swedish) --- website/src/pages/sv/subgraphs/billing.mdx | 67 +++++++++++----------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/website/src/pages/sv/subgraphs/billing.mdx b/website/src/pages/sv/subgraphs/billing.mdx index 6d747371b5b2..d864c1d3d6fb 100644 --- a/website/src/pages/sv/subgraphs/billing.mdx +++ b/website/src/pages/sv/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Fakturering --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,43 +157,43 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Klicka på "Fortsätt" och bekräfta din transaktion. - -Du kan lära dig mer om att få ETH på Coinbase [här](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Detta kommer att vara en stegvis guide för att köpa ETH på Binance. -1. Gå till [Binance](https://www.binance.com/en) och skapa ett konto. -2. När du har skapat ett konto, verifiera din identitet genom en process som kallas KYC (Känn Din Kund). Detta är en standardprocedur för alla centraliserade eller förvarande kryptobörser. -3. När du har verifierat din identitet kan du köpa ETH genom att klicka på knappen "Köp nu" på startsidan. -4. Välj den valuta du vill köpa. Välj ETH. -5. Välj din föredragna betalningsmetod. -6. Ange det belopp av ETH du vill köpa. -7. Granska ditt köp och klicka på "Köp ETH". -8. Bekräfta ditt köp och du kommer att se din ETH i din Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Klicka på knappen "plånbok", klicka på "Ta ut" och välj ETH. - - Ange det belopp av ETH du vill skicka och den vitlistade plånboksadressen du vill skicka det till. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Klicka på "Fortsätt" och bekräfta din transaktion. - -Du kan lära dig mer om att få ETH på Binance [här](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. När du har verifierat din identitet kan du köpa ETH genom att klicka på knappen "Köp nu" på startsidan. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Bekräfta ditt köp och du kommer att se din ETH i din Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Klicka på knappen "plånbok", klicka på "Ta ut" och välj ETH. + - Ange det belopp av ETH du vill skicka och den vitlistade plånboksadressen du vill skicka det till. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Billing FAQs From 2b0521396264699f4a457c20cc71250ded9b2f3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:48 -0500 Subject: [PATCH 0535/1534] New translations billing.mdx (Turkish) --- website/src/pages/tr/subgraphs/billing.mdx | 111 +++++++++++---------- 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/website/src/pages/tr/subgraphs/billing.mdx b/website/src/pages/tr/subgraphs/billing.mdx index 7de170b3c7df..a86c1adbb755 100644 --- a/website/src/pages/tr/subgraphs/billing.mdx +++ b/website/src/pages/tr/subgraphs/billing.mdx @@ -2,20 +2,20 @@ title: Faturalandırma --- -## Subgraph Faturalama Planları +## Querying Plans The Graph Ağı'nda subgraph'leri sorgulamak için kullanabileceğiniz iki plan bulunmaktadır. -- **Ücretsiz Plan**: Ücretsiz Plan, Subgraph Studio test ortamına tam erişim ile birlikte aylık 100.000 ücretsiz sorgu içerir. Bu plan, dapp'lerini ölçeklendirmeden önce The Graph'i denemek isteyen hobi meraklıları, hackathon katılımcıları ve yan projeleri olan kişiler için tasarlanmıştır. +- **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. -- **Büyüme Planı**: Büyüme Planı, Ücretsiz Plan'daki her özelliği içerir ve aylık 100.000 sorgudan sonraki tüm sorgular için GRT veya kredi kartı ile ödeme gerektirir. Büyüme Planı, çeşitli kullanım senaryolarına sahip dapp'leri ölçeklendirmek isteyen ekipler için yeterli esnekliği sunar. +- **Growth Plan**: The Growth Plan includes everything in the Free Plan with all queries after 100,000 monthly queries requiring payments with GRT or credit card. The Growth Plan is flexible enough to cover teams that have established dapps across a variety of use cases. ## Kredi kartı ile sorgu ödemeleri - Kredi/banka kartları ile faturalandırmayı ayarlamak için, kullanıcıların Subgraph Studio'ya (https://thegraph.com/studio/) erişmeleri gerekir - 1. Subgraph Studio Faturalandırma sayfasına gitmek için  [buraya tıklayın](https://thegraph.com/studio/subgraphs/billing/). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Sayfanın sağ üst köşesindeki "Cüzdanı Bağla" düğmesine tıklayın. Cüzdan seçim sayfasına yönlendirileceksiniz. Cüzdanınızı seçin ve "Bağlan" a tıklayın. 3. Ücretsiz Plan'dan yükseltme yapıyorsanız "Planı Yükselt" seçeneğini, daha önce faturalandırma bakiyenize GRT eklediyseniz "Planı Yönet" seçeneğini seçin. Sonrasında, sorgu sayısını tahmin ederek bir fiyat tahmini alabilirsiniz, ancak bu zorunlu bir adım değildir. 4. Kredi kartı ödemesini seçmek için, ödeme yöntemi olarak "Kredi kartı" seçeneğini seçin ve kredi kartı bilgilerinizi doldurun. Daha önce Stripe kullananlar, bilgilerini otomatik doldurmak için Link özelliğini kullanabilirler. @@ -38,23 +38,24 @@ Sorgu ödemelerini yapmak için Arbitrum üzerinde GRT'ye ihtiyacınız var. İ - Ethereum üzerinde zaten GRT'niz varsa, bunu Arbitrum'a köprüleyebilirsiniz. Bunu, Subgraph Studio'da sunulan GRT köprüleme seçeneği aracılığıyla veya aşağıdaki köprülerden birini kullanarak yapabilirsiniz: - [Arbitrum Köprüsü](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - Arbitrum üzerinde zaten varlıklarınız varsa, Uniswap gibi bir takas protokolü aracılığıyla bunları GRT ile takas edebilirsiniz. - Alternatif olarak, GRT'yi doğrudan Arbitrum üzerinde merkeziyetsiz bir borsa aracılığıyla edinebilirsiniz. -> Bu bölüm, cüzdanınızda zaten GRT bulunduğu ve Arbitrum üzerinde olduğunuz varsayılarak yazılmıştır. Eğer GRT'niz yoksa, nasıl GRT edinebileceğinizi [buradan](#getting-grt) öğrenebilirsiniz. +> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). GRT'yi köprüledikten sonra faturalandırma bakiyenize ekleyebilirsiniz. ### Bir cüzdan kullanarak GRT ekleme -1. Subgraph Studio Faturalandırma sayfasına gitmek için  [buraya tıklayın](https://thegraph.com/studio/subgraphs/billing/). +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Sayfanın sağ üst köşesindeki "Cüzdanı Bağla" düğmesine tıklayın. Cüzdan seçim sayfasına yönlendirileceksiniz. Cüzdanınızı seçin ve "Bağlan" a tıklayın. 3. Sağ üst köşedeki 'Yönet' düğmesine tıklayın. İlk kez kullanıyorsanız 'Büyüme Planına Yükselt' seçeneğini göreceksiniz. Daha önce işlem yaptıysanız 'Cüzdandan Yatır' seçeneğine tıklayın. 4. Kaydırıcıyı kullanarak aylık olarak yapmayı beklediğiniz sorgu sayısını tahmin edin. - - Aylık sorgu sayısı hakkında öneriler almak için **Sıkça Sorulan Sorular** sayfamıza göz atın. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. 5. "Kriptopara" seçeneğini seçin. Şu anda The Graph Ağı'nda kabul edilen tek kriptopara GRT'dir. 6. Peşin ödeme yapmak istediğiniz ay sayısını seçin. - Peşin ödeme yapmak, gelecekte kullanım zorunluluğu getirmez. Yalnızca kullandığınız kadar ücretlendirilirsiniz ve bakiyenizi istediğiniz zaman çekebilirsiniz. @@ -67,7 +68,7 @@ GRT'yi köprüledikten sonra faturalandırma bakiyenize ekleyebilirsiniz. ### Bir cüzdan kullanarak GRT çekme -1. Subgraph Studio Faturalandırma sayfasına gitmek için  [buraya tıklayın](https://thegraph.com/studio/subgraphs/billing/). +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). 2. Sayfanın sağ üst köşesindeki "Cüzdanı Bağla" düğmesine tıklayın. Cüzdanınızı seçin ve "Bağlan" düğmesine tıklayın. 3. Sayfanın sağ üst köşesindeki "Yönet" düğmesine tıklayın. "GRT Çek" seçeneğini seçin. Bir yan panel açılacaktır. 4. Çekmek istediğiniz GRT miktarını girin. @@ -76,11 +77,11 @@ GRT'yi köprüledikten sonra faturalandırma bakiyenize ekleyebilirsiniz. ### Multisig cüzdanı kullanarak GRT ekleme -1. Subgraph Studio Faturalandırma sayfasına gitmek için [buraya tıklayın](https://thegraph.com/studio/subgraphs/billing/). -2. Sayfanın sağ üst köşesindeki "Cüzdanı Bağla" düğmesine tıklayın. Cüzdanınızı seçin ve "Bağlan" düğmesine tıklayın. Eğer [Gnosis-Safe](https://gnosis-safe.io/) kullanıyorsanız, multisig cüzdanınızı ve imza cüzdanınızı da bağlayabileceksiniz. Ardından ilgili mesajı imzalayın. Bu işlem gas ücreti gerektirmeyecektir. +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/subgraphs/billing/). +2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. 3. Sağ üst köşedeki 'Yönet' düğmesine tıklayın. İlk kez kullanıyorsanız 'Büyüme Planına Yükselt' seçeneğini göreceksiniz. Daha önce işlem yaptıysanız 'Cüzdandan Yatır' seçeneğine tıklayın. 4. Kaydırıcıyı kullanarak aylık olarak yapmayı beklediğiniz sorgu sayısını tahmin edin. - - Aylık sorgu sayısı hakkında öneriler almak için **Sıkça Sorulan Sorular** sayfamıza göz atın. + - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. 5. "Kriptopara" seçeneğini seçin. Şu anda The Graph Ağı'nda kabul edilen tek kriptopara GRT'dir. 6. Peşin ödeme yapmak istediğiniz ay sayısını seçin. - Peşin ödeme yapmak, gelecekte kullanım zorunluluğu getirmez. Yalnızca kullandığınız kadar ücretlendirilirsiniz ve bakiyenizi istediğiniz zaman çekebilirsiniz. @@ -98,7 +99,7 @@ Bu bölüm, sorgu ücretlerini ödemek için nasıl GRT edinebileceğinizi anlat Bu kılavuz, Coinbase üzerinden GRT satın alma işlemini adım adım açıklayacaktır. -1. [Coinbase](https://www.coinbase.com/)'e gidin ve bir hesap oluşturun. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. 2. Bir hesap oluşturduktan sonra, kimliğinizi KYC (Müşterini Tanı) olarak bilinen bir süreçle doğrulamanız gerekecek. KYC, tüm merkezi veya emanetçi kripto borsaları için standart bir prosedürdür. 3. Kimliğinizi doğruladıktan sonra GRT satın alabilirsiniz. Bunu sayfanın sağ üst köşesindeki "Al/Sat" düğmesine tıklayarak yapabilirsiniz. 4. Satın almak istediğiniz para birimini seçin. GRT'yi seçin. @@ -106,19 +107,19 @@ Bu kılavuz, Coinbase üzerinden GRT satın alma işlemini adım adım açıklay 6. Satın almak istediğiniz GRT miktarını seçin. 7. Satın alımınızı gözden geçirin. Satın alımınızı gözden geçirin ve "GRT Satın Al" düğmesine tıklayın. 8. Satın alımınızı onaylayın. Satın alımınızı onaylayın, böylece başarılı bir şekilde GRT satın almış olacaksınız. -9. GRT'yi hesabınızdan [MetaMask](https://metamask.io/) gibi bir cüzdana transfer edebilirsiniz. +9. You can transfer the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). - GRT'yi cüzdanınıza transfer etmek için, sayfanın sağ üst köşesindeki "Hesaplar" düğmesine tıklayın. - GRT hesabının yanındaki "Gönder" düğmesine tıklayın. - Göndermek istediğiniz GRT miktarını ve göndermek istediğiniz cüzdan adresini girin. - "Devam" düğmesine tıklayın ve işleminizi onaylayın. -Lütfen unutmayın, yüksek tutarda satın alım durumunda Coinbase, tam tutarı bir cüzdana transfer etmeden önce 7-10 gün beklemenizi isteyebilir. -Coinbase üzerinden GRT edinme hakkında daha fazla bilgiye [buradan](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) ulaşabilirsiniz. +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Bu kılavuz, Binance üzerinden GRT satın alma işlemini adım adım açıklayacaktır. -1. [Binance](https://www.binance.com/en)e gidin ve bir hesap oluşturun. +1. Go to [Binance](https://www.binance.com/en) and create an account. 2. Bir hesap oluşturduktan sonra, kimliğinizi KYC (Müşterini Tanı) olarak bilinen bir süreçle doğrulamanız gerekecek. KYC, tüm merkezi veya emanetçi kripto borsaları için standart bir prosedürdür. 3. Kimliğinizi doğruladıktan sonra GRT satın alabilirsiniz. Bunu, ana sayfa banner'ındaki "Şimdi Satın Al" düğmesine tıklayarak yapabilirsiniz. 4. Satın almak istediğiniz para birimini seçebileceğiniz bir sayfaya yönlendirileceksiniz. GRT'yi seçin. @@ -126,27 +127,27 @@ Bu kılavuz, Binance üzerinden GRT satın alma işlemini adım adım açıklaya 6. Satın almak istediğiniz GRT miktarını seçin. 7. Satın alımınızı gözden geçirin ve "GRT Satın Al" düğmesine tıklayın. 8. Satın alımınızı onaylayın, ardından GRT'nizi Binance Spot Cüzdanınızda görüntüleyebilirsiniz. -9. GRT'yi hesabınızdan [MetaMask](https://metamask.io/) gibi bir cüzdana çekebilirsiniz. - - GRT'yi cüzdanınıza çekmek için cüzdan adresinizi çekim beyaz listesine ekleyin. Daha fazla bilgi için [buraya tıklayın](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570). +9. You can withdraw the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). + - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. - "Cüzdan" düğmesine tıklayın, ardından "Çekim" seçeneğine tıklayın ve GRT'yi seçin. - Göndermek istediğiniz GRT miktarını ve beyaz listeye eklenmiş cüzdan adresini girin. - "Devam" düğmesine tıklayın ve işleminizi onaylayın. -Binance üzerinde GRT edinme hakkında daha fazla bilgiye [buradan](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582) ulaşabilirsiniz. +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap Bu kılavuz, Uniswap üzerinden GRT satın alma işlemini adım adım açıklayacaktır. -1. [Uniswap](https://app.uniswap.org/swap?chain=arbitrum)'e gidin ve cüzdanınızı bağlayın. +1. Go to [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) and connect your wallet. 2. Baz para birimi olarak kullanılacak token'ı seçin. ETH'yi seçin. 3. Satın almak istediğiniz token'ı seçin. GRT'yi seçin. - - Doğru token'a takas ettiğinizden emin olun. Arbitrum One üzerindeki GRT akıllı sözleşme adresi: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) + - Make sure you're swapping for the correct token. The GRT smart contract address on Arbitrum One is: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) 4. Takas etmek istediğiniz ETH miktarını girin. 5. "Swap" butonuna tıklayın. 6. İşlemi cüzdanınızda onaylayın ve işlemin tamamlanmasını bekleyin. -Uniswap üzerinden GRT edinme hakkında daha fazla bilgiye [buradan](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-) ulaşabilirsiniz. +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). ## Ether Edinme @@ -156,43 +157,43 @@ Bu bölüm, işlem ücretlerini veya gas maliyetlerini ödemek için nasıl Ethe Bu kılavuz, Coinbase üzerinden ETH satın alma işlemini adım adım açıklayacaktır. -1. [Coinbase](https://www.coinbase.com/)'e gidin ve bir hesap oluşturun. -2. Bir hesap oluşturduktan sonra, kimliğinizi KYC (Müşterini Tanı) olarak bilinen bir süreçle doğrulamanız gerekecek. Bu, tüm merkezi veya emanetçi kripto borsaları için standart bir prosedürdür. -3. Kimliğinizi doğruladıktan sonra, sayfanın sağ üst köşesindeki "Al/Sat" düğmesine tıklayarak ETH satın alın. -4. Satın almak istediğiniz para birimini seçin. ETH seçin. -5. Tercih ettiğiniz ödeme yöntemini seçin. -6. Satın almak istediğiniz ETH miktarını girin. -7. Satın alımınızı gözden geçirin ve "ETH Satın Al" düğmesine tıklayın. -8. Satın alımınızı onaylayın, böylece ETH'yi başarıyla satın almış olacaksınız. -9. ETH'yi hesabınızdan [MetaMask](https://metamask.io/) gibi bir cüzdana transfer edebilirsiniz. - - ETH'yi cüzdanınıza transfer etmek için, sayfanın sağ üst köşesindeki "Hesaplar" düğmesine tıklayın. - - ETH hesabının yanındaki "Gönder" düğmesine tıklayın. - - Göndermek istediğiniz ETH miktarını ve göndermek istediğiniz cüzdan adresini girin. - - Gönderdiğiniz adresin Arbitrum One üzerindeki Ethereum cüzdan adresiniz olduğundan emin olun. - - "Devam" düğmesine tıklayın ve işleminizi onaylayın. - -Coinbase'de ETH edinmekle alakalı daha fazla bilgiyi [buradan](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) öğrenebilirsiniz. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Bir hesap oluşturduktan sonra, kimliğinizi KYC (Müşterini Tanı) olarak bilinen bir süreçle doğrulamanız gerekecek. Bu, tüm merkezi veya emanetçi kripto borsaları için standart bir prosedürdür. +3. Kimliğinizi doğruladıktan sonra, sayfanın sağ üst köşesindeki "Al/Sat" düğmesine tıklayarak ETH satın alın. +4. Satın almak istediğiniz para birimini seçin. ETH seçin. +5. Tercih ettiğiniz ödeme yöntemini seçin. +6. Satın almak istediğiniz ETH miktarını girin. +7. Satın alımınızı gözden geçirin ve "ETH Satın Al" düğmesine tıklayın. +8. Satın alımınızı onaylayın, böylece ETH'yi başarıyla satın almış olacaksınız. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - ETH'yi cüzdanınıza transfer etmek için, sayfanın sağ üst köşesindeki "Hesaplar" düğmesine tıklayın. + - ETH hesabının yanındaki "Gönder" düğmesine tıklayın. + - Göndermek istediğiniz ETH miktarını ve göndermek istediğiniz cüzdan adresini girin. + - Gönderdiğiniz adresin Arbitrum One üzerindeki Ethereum cüzdan adresiniz olduğundan emin olun. + - "Devam" düğmesine tıklayın ve işleminizi onaylayın. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance Bu, Binance'de ETH satın almak için adım adım bir rehberdir. -1. [Binance](https://www.binance.com/en)e gidin ve bir hesap oluşturun. -2. Bir hesap oluşturduktan sonra, kimliğinizi KYC (Müşterini Tanı) olarak bilinen bir süreçle doğrulamanız gerekecek. Bu, tüm merkezi veya emanetçi kripto borsaları için standart bir prosedürdür. -3. Kimliğinizi doğruladıktan sonra, ana sayfa afişindeki "Şimdi Satın Al" düğmesine tıklayarak ETH satın alın. -4. Satın almak istediğiniz para birimini seçin. ETH seçin. -5. Tercih ettiğiniz ödeme yöntemini seçin. -6. Satın almak istediğiniz ETH miktarını girin. -7. Satın alımınızı gözden geçirin ve "ETH Satın Al" düğmesine tıklayın. -8. Satın alımınızı onaylayın ve ETH'nizi Binance Spot Cüzdanınızda görüceksiniz. -9. ETH'yi hesabınızdan [MetaMask](https://metamask.io/) gibi bir cüzdana çekebilirsiniz. - - ETH'yi cüzdanınıza çekmek için cüzdan adresinizi çekim beyaz listesine ekleyin. Daha fazla bilgi için buraya tıklayın. - - "Cüzdan" düğmesine tıklayın, para çekme seçeneğine tıklayın ve ETH'yi seçin. - - Göndermek istediğiniz ETH miktarını ve göndermek istediğiniz güvenilir adresler listesindeki cüzdan adresini girin. - - Gönderdiğiniz adresin Arbitrum One üzerindeki Ethereum cüzdan adresiniz olduğundan emin olun. - - "Devam" düğmesine tıklayın ve işleminizi onaylayın. - -Binance'de ETH edinmekle alakalı daha fazla bilgiyi [buradan](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582) öğrenebilirsiniz. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Bir hesap oluşturduktan sonra, kimliğinizi KYC (Müşterini Tanı) olarak bilinen bir süreçle doğrulamanız gerekecek. Bu, tüm merkezi veya emanetçi kripto borsaları için standart bir prosedürdür. +3. Kimliğinizi doğruladıktan sonra, ana sayfa afişindeki "Şimdi Satın Al" düğmesine tıklayarak ETH satın alın. +4. Satın almak istediğiniz para birimini seçin. ETH seçin. +5. Tercih ettiğiniz ödeme yöntemini seçin. +6. Satın almak istediğiniz ETH miktarını girin. +7. Satın alımınızı gözden geçirin ve "ETH Satın Al" düğmesine tıklayın. +8. Satın alımınızı onaylayın ve ETH'nizi Binance Spot Cüzdanınızda görüceksiniz. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - ETH'yi cüzdanınıza çekmek için cüzdan adresinizi çekim beyaz listesine ekleyin. Daha fazla bilgi için buraya tıklayın. + - "Cüzdan" düğmesine tıklayın, para çekme seçeneğine tıklayın ve ETH'yi seçin. + - Göndermek istediğiniz ETH miktarını ve göndermek istediğiniz güvenilir adresler listesindeki cüzdan adresini girin. + - Gönderdiğiniz adresin Arbitrum One üzerindeki Ethereum cüzdan adresiniz olduğundan emin olun. + - "Devam" düğmesine tıklayın ve işleminizi onaylayın. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Faturalandırma Hakkında SSS @@ -202,11 +203,11 @@ Kaç sorguya ihtiyacınız olacağını önceden bilmeniz gerekmez. Yalnızca ku Sorgu sayısını fazla tahmin etmenizi öneririz, böylece bakiyenizi sık sık doldurmak zorunda kalmazsınız. Küçük ve orta ölçekli uygulamalar için iyi bir başlangıç tahmini, aylık 1M-2M sorgu ile başlamak ve ilk haftalarda kullanımı yakından izlemektir. Daha büyük uygulamalar için iyi bir tahmin, sitenizin günlük ziyaret sayısını, en aktif sayfanızın açılışta yaptığı sorgu sayısı ile çarpmaktır. -Elbette, hem yeni hem de mevcut kullanıcılar, beklenen kullanım hakkında daha fazla bilgi almak için Edge & Node'un İş Geliştirme (BD) ekibiyle iletişime geçebilirler. +Of course, both new and existing users can reach out to Edge & Node's BD team for a consult to learn more about anticipated usage. ### Faturalandırma bakiyemden GRT çekebilir miyim? -Evet, faturalandırma bakiyenizde sorgularda kullanılmamış olan GRT'yi her zaman çekebilirsiniz. Faturalandırma sözleşmesi yalnızca GRT'yi Ethereum ana ağından Arbitrum ağına bağlamak için tasarlanmıştır. GRT'nizi Arbitrum'dan tekrar Ethereum ana ağına aktarmak isterseniz, [Arbitrum Köprüsü](https://bridge.arbitrum.io/?l2ChainId=42161)'nü kullanmanız gerekir. +Yes, you can always withdraw GRT that has not already been used for queries from your billing balance. The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). ### Faturalandırma bakiyem tükendiğinde ne olur? Bir uyarı alacak mıyım? From 20a1a05b4bcd2ab7c4b22eec22ae8d272fda4057 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:49 -0500 Subject: [PATCH 0536/1534] New translations billing.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/billing.mdx | 59 +++++++++++----------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/website/src/pages/uk/subgraphs/billing.mdx b/website/src/pages/uk/subgraphs/billing.mdx index 53be2718dd7d..ac919c79491b 100644 --- a/website/src/pages/uk/subgraphs/billing.mdx +++ b/website/src/pages/uk/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Білінг --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From cef2bd1e8e384c8a14844ebede456d5f44e1bb9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:51 -0500 Subject: [PATCH 0537/1534] New translations billing.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/billing.mdx | 63 +++++++++++----------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/website/src/pages/zh/subgraphs/billing.mdx b/website/src/pages/zh/subgraphs/billing.mdx index f7bee3cc862f..985cc1679f23 100644 --- a/website/src/pages/zh/subgraphs/billing.mdx +++ b/website/src/pages/zh/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: 计费 --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,7 +38,8 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) -- [转移到](https://transferto.xyz/swap) + +- [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -130,7 +131,7 @@ This will be a step by step guide for purchasing GRT on Binance. - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. - Click on the "wallet" button, click withdraw, and select GRT. - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. - - Click "Continue" and confirm your transaction. + - 单击“继续”并确认您的交易。 You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. 创建账户后,您需要通过KYC(或了解您的客户)流程验证您的身份。这是所有中心化或托管加密交易所的标准程序。 +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. 选择要购买的货币。选择ETH。 +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - 单击“继续”并确认您的交易。 You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. 一旦您完成了身份验证,您可以通过在首页横幅上点击“立即购买”按钮来购买ETH。 -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. 确认您的购买,您将能够在Binance现货钱包中看到您的ETH。 -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - 单击“钱包”按钮,单击提取,然后选择ETH。 - - 输入您要发送的ETH金额和您要发送到的白名单钱包地址。 - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. 创建账户后,您需要通过KYC(或了解您的客户)流程验证您的身份。这是所有中心化或托管加密交易所的标准程序。 +3. 一旦您完成了身份验证,您可以通过在首页横幅上点击“立即购买”按钮来购买ETH。 +4. 选择要购买的货币。选择ETH。 +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. 确认您的购买,您将能够在Binance现货钱包中看到您的ETH。 +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - 单击“钱包”按钮,单击提取,然后选择ETH。 + - 输入您要发送的ETH金额和您要发送到的白名单钱包地址。 + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - 单击“继续”并确认您的交易。 You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From 1144a3281cfe391bf3cc9d5f4eed78a89c6e5707 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:52 -0500 Subject: [PATCH 0538/1534] New translations billing.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/billing.mdx | 69 +++++++++++----------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/website/src/pages/ur/subgraphs/billing.mdx b/website/src/pages/ur/subgraphs/billing.mdx index bc78cfadd367..f7f5c848204d 100644 --- a/website/src/pages/ur/subgraphs/billing.mdx +++ b/website/src/pages/ur/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: بلنگ --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,7 +38,8 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) -- [کی طرف بھیجنا](https://transferto.xyz/swap) + +- [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,43 +157,43 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. - -کوائن بیس پر ایتھیریم حاصل کرنے کے بارے میں آپ [یہاں](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) مزید جان سکتے ہیں. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance یہ بائنینس پر ایتھیریم کی خریداری کے لیے مرحلہ وار گائیڈ ہوگا. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. اپنی شناخت کی تصدیق کرنے کے بعد، ہوم پیج بینر پر "اب خریدیں" بٹن پر کلک کرکے ایتھیریم خریدیں. -4. وہ کرنسی منتخب کریں جسے آپ خریدنا چاہتے ہیں۔ ایتھیریم کو منتخب کریں. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. اپنی خریداری کی تصدیق کریں اور آپ اپنے بائننس اسپاٹ والیٹ میں اپنا ایتھیریم دیکھیں گے. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - "والیٹ" بٹن پر کلک کریں، واپس لینے پر کلک کریں، اور ایتھیریم کو منتخب کریں. - - ایتھیریم کی وہ رقم درج کریں جسے آپ بھیجنا چاہتے ہیں اور وائٹ لسٹ شدہ والیٹ ایڈریس جس پر آپ اسے بھیجنا چاہتے ہیں. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. - -آپ بائنینس پر ایتھیریم حاصل کرنے کے بارے میں مزید جان سکتے ہیں [یہاں ](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. اپنی شناخت کی تصدیق کرنے کے بعد، ہوم پیج بینر پر "اب خریدیں" بٹن پر کلک کرکے ایتھیریم خریدیں. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. اپنی خریداری کی تصدیق کریں اور آپ اپنے بائننس اسپاٹ والیٹ میں اپنا ایتھیریم دیکھیں گے. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - "والیٹ" بٹن پر کلک کریں، واپس لینے پر کلک کریں، اور ایتھیریم کو منتخب کریں. + - ایتھیریم کی وہ رقم درج کریں جسے آپ بھیجنا چاہتے ہیں اور وائٹ لسٹ شدہ والیٹ ایڈریس جس پر آپ اسے بھیجنا چاہتے ہیں. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Billing FAQs From 67162ed3b856eda8701e5df9ddcfb5fc279bc320 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:53 -0500 Subject: [PATCH 0539/1534] New translations billing.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/billing.mdx | 59 +++++++++++----------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/website/src/pages/vi/subgraphs/billing.mdx b/website/src/pages/vi/subgraphs/billing.mdx index a88a51c3adff..c9f380bb022c 100644 --- a/website/src/pages/vi/subgraphs/billing.mdx +++ b/website/src/pages/vi/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Billing --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From ce63aee70c0b3f13f3f5b069f6c38ac0b9ccfe09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:54 -0500 Subject: [PATCH 0540/1534] New translations billing.mdx (Marathi) --- website/src/pages/mr/subgraphs/billing.mdx | 59 +++++++++++----------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/website/src/pages/mr/subgraphs/billing.mdx b/website/src/pages/mr/subgraphs/billing.mdx index 1d3221e488ee..7126ce22520f 100644 --- a/website/src/pages/mr/subgraphs/billing.mdx +++ b/website/src/pages/mr/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: Billing --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -38,6 +38,7 @@ To pay for queries, you need GRT on Arbitrum. Here are a few different ways to a - If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From 3d06c9d97f20023068508901461c3e5ce0024eae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:55 -0500 Subject: [PATCH 0541/1534] New translations billing.mdx (Hindi) --- website/src/pages/hi/subgraphs/billing.mdx | 67 +++++++++++----------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/website/src/pages/hi/subgraphs/billing.mdx b/website/src/pages/hi/subgraphs/billing.mdx index f32654675eb6..73a12dc0644c 100644 --- a/website/src/pages/hi/subgraphs/billing.mdx +++ b/website/src/pages/hi/subgraphs/billing.mdx @@ -2,7 +2,7 @@ title: बिलिंग --- -## Subgraph Billing Plans +## Querying Plans There are two plans to use when querying subgraphs on The Graph Network. @@ -31,20 +31,21 @@ Subgraph users can use The Graph Token (or GRT) to pay for queries on The Graph ### GRT on Arbitrum or Ethereum -The Graph का बिलिंग सिस्टम Arbitrum पर GRT को स्वीकार करता है, और उपयोगकर्ताओं को गैस के भुगतान के लिए Arbitrum पर ETH की आवश्यकता होगी। जबकि The Graph प्रोटोकॉल Ethereum Mainnet पर शुरू हुआ, सभी गतिविधियाँ, जिसमें बिलिंग कॉन्ट्रैक्ट्स भी शामिल हैं, अब Arbitrum One पर हैं। +The Graph का बिलिंग सिस्टम Arbitrum पर GRT को स्वीकार करता है, और उपयोगकर्ताओं को गैस के भुगतान के लिए Arbitrum पर ETH की आवश्यकता होगी। जबकि The Graph प्रोटोकॉल Ethereum Mainnet पर शुरू हुआ, सभी गतिविधियाँ, जिसमें बिलिंग कॉन्ट्रैक्ट्स भी शामिल हैं, अब Arbitrum One पर हैं। क्वेरियों के लिए भुगतान करने के लिए, आपको Arbitrum पर GRT की आवश्यकता है। इसे प्राप्त करने के लिए कुछ विभिन्न तरीके यहां दिए गए हैं: -- यदि आपके पास पहले से Ethereum पर GRT है, तो आप इसे Arbitrum पर ब्रिज कर सकते हैं। आप यह Subgraph Studio में प्रदान किए गए GRT ब्रिजिंग विकल्प के माध्यम से या निम्नलिखित में से किसी एक ब्रिज का उपयोग करके कर सकते हैं: +- यदि आपके पास पहले से Ethereum पर GRT है, तो आप इसे Arbitrum पर ब्रिज कर सकते हैं। आप यह Subgraph Studio में प्रदान किए गए GRT ब्रिजिंग विकल्प के माध्यम से या निम्नलिखित में से किसी एक ब्रिज का उपयोग करके कर सकते हैं: - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) - यदि आपके पास पहले से Arbitrum पर संपत्तियाँ हैं, तो आप उन्हें Uniswap जैसे स्वैपिंग प्रोटोकॉल के माध्यम से GRT के लिए स्वैप कर सकते हैं। - वैकल्पिक रूप से, आप एक विकेंद्रीकृत एक्सचेंज के माध्यम से Arbitrum पर सीधे GRT प्राप्त कर सकते हैं। -> यह अनुभाग इस धारणा के तहत लिखा गया है कि आपके पास पहले से ही अपने वॉलेट में **GRT** है, और आप **Arbitrum** पर हैं। यदि आपके पास **GRT** नहीं है, तो आप **यहाँ** [जान सकते हैं कि GRT कैसे प्राप्त करें](#getting-grt)। +> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). एक बार जब आप GRT को ब्रिज करते हैं, तो आप इसे अपने बिलिंग बैलेंस में जोड़ सकते हैं। @@ -127,7 +128,7 @@ This will be a step by step guide for purchasing GRT on Binance. 7. Review your purchase and click "Buy GRT". 8. Confirm your purchase and you will be able to see your GRT in your Binance Spot Wallet. 9. You can withdraw the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). - - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) GRT को अपने वॉलेट में, निकासी श्वेतसूची में अपने वॉलेट का पता जोड़ें। + - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. - Click on the "wallet" button, click withdraw, and select GRT. - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. - Click "Continue" and confirm your transaction. @@ -156,20 +157,20 @@ This section will show you how to get Ether (ETH) to pay for transaction fees or This will be a step by step guide for purchasing ETH on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). @@ -177,20 +178,20 @@ You can learn more about getting ETH on Coinbase [here](https://help.coinbase.co This will be a step by step guide for purchasing ETH on Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. + - Click "Continue" and confirm your transaction. You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). From 345d6967f55776ff516ece3264587aa915bd30c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:56 -0500 Subject: [PATCH 0542/1534] New translations arweave.mdx (Romanian) --- .../pages/ro/subgraphs/cookbook/arweave.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/arweave.mdx b/website/src/pages/ro/subgraphs/cookbook/arweave.mdx index 2098d5ab5932..2372025621d1 100644 --- a/website/src/pages/ro/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/arweave.mdx @@ -92,9 +92,9 @@ Arweave data sources support two types of handlers: - `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Schema Definition @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 76fbe310679ea2859597c74dc232194fad88472d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:57 -0500 Subject: [PATCH 0543/1534] New translations arweave.mdx (French) --- .../pages/fr/subgraphs/cookbook/arweave.mdx | 124 +++++++++--------- 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/arweave.mdx b/website/src/pages/fr/subgraphs/cookbook/arweave.mdx index b399d29205ce..2b11f5ea02a1 100644 --- a/website/src/pages/fr/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/arweave.mdx @@ -13,36 +13,36 @@ Arweave est un protocole qui permet aux développeurs de stocker des données de Arweave a déjà construit de nombreuses bibliothèques pour intégrer le protocole dans plusieurs langages de programmation différents. Pour plus d'informations, vous pouvez consulter : - [Arwiki](https://arwiki.wiki/#/en/main) -- [Ressources d'Arweave](https://www.arweave.org/build) +- [Arweave Resources](https://www.arweave.org/build) -## À quoi servent les subgraphes d'Arweave ? +## À quoi servent les subgraphs d'Arweave ? -The Graph vous permet de créer des API ouvertes personnalisées appelées « subgraphes ». Les subgraphes sont utilisés pour indiquer aux indexeurs (gestionnaires de serveur) les données à indexer sur une blockchain et à enregistrer sur leurs serveurs afin que vous puissiez les interroger à tout moment à l'aide de [GraphQL](https://graphql.org/). +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable d'indexer les données sur le protocole Arweave. L'intégration actuelle indexe uniquement Arweave en tant que blockchain (blocs et transactions), elle n'indexe pas encore les fichiers stockés. +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## Construire un subgraph Arweave Pour pouvoir créer et déployer des Arweave Subgraphs, vous avez besoin de deux packages : -1. Les versions supérieures à 0.30.2 du `@graphprotocol/graph-cli` - Il s'agit d'un outil caractérisé par l'utilisation de lignes de commandes pour construire et déployer des subgraphes. Cliquez [ici](https://www.npmjs.com/package/@graphprotocol/graph-cli) pour le télécharger en utilisant `npm`. -2. `@graphprotocol/graph-ts` version supérieure à 0.27.0 - Il s'agit d'une bibliothèque de types spécifiques aux subgraphs. [Cliquez ici](https://www.npmjs.com/package/@graphprotocol/graph-ts) pour télécharger en utilisant `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Caractéristique des subgraphs Il y a trois composants d'un subgraph : -### 1. Manifeste - `subgraph.yaml` +### 1. Manifest - `subgraph.yaml` Définit les sources de données intéressantes et la manière dont elles doivent être traitées. Arweave est un nouveau type de source de données. -### 2. Schéma - `schema.graphql` +### 2. Schema - `schema.graphql` Vous définissez ici les données que vous souhaitez pouvoir interroger après avoir indexé votre subgraph à l'aide de GraphQL. Ceci est en fait similaire à un modèle pour une API, où le modèle définit la structure d'un corps de requête. -Les exigences relatives aux subgraphs Arweave sont couvertes par la [documentation existante](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. Mappages AssemblyScript - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` Il s'agit de la logique qui détermine comment les données doivent être récupérées et stockées lorsqu'une personne interagit avec les sources de données que vous interrogez. Les données sont traduites et stockées sur la base du schema que vous avez répertorié. @@ -55,7 +55,7 @@ $ graph build # génère le Web Assembly à partir des fichiers AssemblyScript, ## Définition du manifeste du subgraph -Le manifeste du subgraph `subgraph.yaml` identifie les sources de données pour le subgraph, les déclencheurs d'intérêt, et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Ci-dessous un exemple de manifeste pour un subgraph visant Arweave : +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,28 +82,28 @@ dataSources: - handler: handleTx # le nom de la fonction dans le fichier de mapping ``` -- Les subgraphs Arweave introduisent un nouveau type de source de données (`arweave`) +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Les sources de données Arweave introduisent un champ source.owner facultatif, qui est la clé publique d'un portefeuille Arweave Les sources de données Arweave prennent en charge deux types de gestionnaires : -- `blockHandlers` - Exécuté sur chaque nouveau bloc Arweave. Aucun source.owner n'est requis. -- `transactionHandlers` : exécuté sur chaque transaction dont le `source.owner` de la source de données est le propriétaire. Actuellement, un propriétaire est requis pour les `transactionHandlers`. Si les utilisateurs souhaitent traiter toutes les transactions, ils doivent fournir "" comme `source.owner` +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > Source.owner peut être l’adresse du propriétaire ou sa clé publique. - +> > Les transactions sont les éléments constitutifs du permaweb Arweave et ce sont des objets créés par les utilisateurs finaux. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. -## Définition d'un schéma +## Définition de schéma -La définition du schéma décrit la structure de la base de données de subgraphs résultante et les relations entre les entités. Ceci est indépendant de la source de données d’origine. Vous trouverez plus de détails sur la définition du schéma de subgraph [ici](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## Cartographies AssemblyScript -Les gestionnaires pour le traitement des événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -146,11 +146,11 @@ class Transaction { } ``` -Les gestionnaires de blocs reçoivent un `Block`, tandis que les transactions reçoivent un `Transaction`. +Block handlers receive a `Block`, while transactions receive a `Transaction`. -L'écriture des mappages d'un subgraph Arweave est très similaire à l'écriture des mappages d'un subgraph Ethereum. Pour plus d'informations, cliquez [ici](/developing/creating-a-subgraph/#writing-mappings). +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph in Subgraph Studio +## Déploiement d'un subgraph Arweave dans Subgraph Studio Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. @@ -160,15 +160,15 @@ graph deploy --access-token ## Interroger un subgraph d'Arweave -Le endpoint GraphQL pour les subgraphs d'Arweave est déterminé par la définition du schema, avec l'interface API existante. Veuillez consulter la [documentation de l'API GraphQL](/subgraphs/querying/graphql-api/) pour plus d'informations. +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exemples de subgraphs Voici un exemple de modèle subgraph : -- [Exemple de subgraph pour Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## Questions fréquemment posées +## FAQ ### Un subgraph peut-il indexer Arweave et d'autres chaînes ? @@ -188,52 +188,52 @@ La source.owner peut être la clé publique de l'utilisateur ou l'adresse de son ### Quel est le format de chiffrement actuel ? -Les données sont généralement transmises dans les mappages sous forme d'octets, qui, s'ils sont stockés directement, sont renvoyés dans le subgraph au format `hex` (ex. hachages de bloc et de transaction). Vous souhaiterez peut-être convertir vos mappages en un format `base64` ou `base64 URL` sécurisé, afin de correspondre à ce qui est affiché dans les explorateurs de blocs comme [Explorateur Arweave](https : //viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -La fonction d'assistance `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` suivante peut être utilisée et sera ajoutée à `graph-ts` : +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 5df8ec9863448315422aafc240b2798bdfa1ba70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:58 -0500 Subject: [PATCH 0544/1534] New translations arweave.mdx (Spanish) --- .../pages/es/subgraphs/cookbook/arweave.mdx | 124 +++++++++--------- 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/arweave.mdx b/website/src/pages/es/subgraphs/cookbook/arweave.mdx index 645fae5f5783..c0333e3dadf8 100644 --- a/website/src/pages/es/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/es/subgraphs/cookbook/arweave.mdx @@ -13,36 +13,36 @@ El protocolo Arweave permite a los developers almacenar datos de forma permanent Arweave ya ha construido numerosas bibliotecas para integrar el protocolo en varios lenguajes de programación. Para más información puede consultar: - [Arwiki](https://arwiki.wiki/#/en/main) -- [Recursos de Arweave](https://www.arweave.org/build) +- [Arweave Resources](https://www.arweave.org/build) ## ¿Qué son los subgrafos Arweave? -The Graph te permite crear API abiertas personalizadas llamadas "subgrafos". Los Subgrafos se utilizan para indicar a los Indexadores (operadores de servidores) qué datos indexar en una blockchain y guardar en sus servidores para que puedas consultarlos en cualquier momento usando [GraphQL](https://graphql.org/). +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) ahora puede indexar datos en el protocolo Arweave. La integración actual solo indexa Arweave como una blockchain (bloques y transacciones), aún no indexa los archivos almacenados. +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## Construcción de un subgrafo Arweave Para poder construir y deployar subgrafos Arweave, necesita dos paquetes: -1. `@graphprotocol/graph-cli` versión anterior 0.30.2: esta es una herramienta de línea de comandos para crear e implementar subgrafos. [Haga clic aquí](https://www.npmjs.com/package/@graphprotocol/graph-cli) para descargar usando `npm`. -2. `@graphprotocol/graph-ts` versión anterior 0.27.0: esta es una biblioteca de tipos específicos de subgrafos. [Haga clic aquí](https://www.npmjs.com/package/@graphprotocol/graph-ts) para descargar usando `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Componentes del subgrafo Hay tres componentes de un subgrafo: -### 1. Manifiesto - `subgraph.yaml` +### 1. Manifest - `subgraph.yaml` Define las fuentes de datos de interés y cómo deben ser procesadas. Arweave es un nuevo tipo de fuente de datos. -### 2. Esquema - `schema.graphql` +### 2. Schema - `schema.graphql` Aquí defines qué datos quieres poder consultar después de indexar tu Subgrafo usando GraphQL. Esto es en realidad similar a un modelo para una API, donde el modelo define la estructura de un cuerpo de solicitud. -Los requisitos para los subgrafos de Arweave están cubiertos por la [documentación existente](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### Asignaciones de AssemblyScript - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` Esta es la lógica que determina cómo los datos deben ser recuperados y almacenados cuando alguien interactúa con las fuentes de datos que estás escuchando. Los datos se traducen y se almacenan basándose en el esquema que has listado. @@ -55,7 +55,7 @@ $ graph build # genera Web Assembly a partir de los archivos de AssemblyScript y ## Definición de manifiesto del subgrafo -El manifiesto del subgrafo `subgraph.yaml` identifica las fuentes de datos para el subgrafo, los disparadores de interés y las funciones que deben ejecutarse en respuesta a esos disparadores. A continuación se muestra un ejemplo de manifiesto de subgrafos para un subgrafo de Arweave: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,28 +82,28 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Los subgrafos de Arweave introducen un nuevo tipo de fuente de datos (`arweave`) +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Las fuentes de datos de Arweave introducen un campo opcional "source.owner", que es la clave pública de una billetera Arweave Las fuentes de datos de Arweave admiten dos tipos de handlers: -- `blockHandlers` - Ejecutar en cada nuevo bloque de Arweave. No se requiere source.owner. -- `transactionHandlers`: se ejecuta en cada transacción en la que el `source.owner` de la fuente de datos es el propietario. Actualmente se requiere un propietario para `transactionHandlers`, si los usuarios desean procesar todas las transacciones deben proporcionar "" como `source.owner` +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > El source.owner puede ser la dirección del propietario o su clave pública. - +> > Las transacciones son los bloques de construcción de la permaweb de Arweave y son objetos creados por los usuarios finales. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. -## Definición del esquema +## Definición de esquema -La definición de esquema describe la estructura de la base de datos de subgrafos resultante y las relaciones entre las entidades. Esto es independiente de la fuente de datos original. Hay más detalles sobre la definición del esquema de subgrafo [aquí](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). -## Mappings de AssemblyScript +## Asignaciones de AssemblyScript -Los handlers para procesar eventos están escritos en [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -146,9 +146,9 @@ class Transaction { } ``` -Los handlers de bloques reciben un `Block`, mientras que las transacciones reciben una `Transaction`. +Block handlers receive a `Block`, while transactions receive a `Transaction`. -Escribir los mappings de un subgrafo de Arweave es muy similar a escribir los mappings de un subgrafo de Ethereum. Para obtener más información, haz clic [aquí](/developing/creating-a-subgraph/#write-mappings). +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). ## Deploying an Arweave Subgraph in Subgraph Studio @@ -160,15 +160,15 @@ graph deploy --access-token ## Consultando un subgrafo de Arweave -El endpoint de GraphQL para los subgrafos de Arweave está determinado por la definición del esquema, con la interfaz API existente. Visita la [documentación de la API de GraphQL](/subgraphs/querying/graphql-api/) para obtener más información. +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. -## Ejemplos de subgrafos +## Subgrafos de ejemplo A continuación se muestra un ejemplo de subgrafo como referencia: -- [Ejemplo de subgrafo para Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## Preguntas frecuentes +## FAQ ### ¿Puede un subgrafo indexar Arweave y otras cadenas? @@ -188,52 +188,52 @@ El source.owner puede ser la clave pública del usuario o la dirección de la cu ### ¿Cuál es el formato actual de encriptación? -Los datos generalmente se pasan a los mappings como Bytes, que si se almacenan directamente se devuelven en el subgrafo en un formato `hex` (por ejemplo, hash de bloque y transacción). Puedes querer convertir tus asignaciones a un formato seguro `base64` o `base64 URL` para que coincida con lo que se muestra en los exploradores de bloques como [Explorador de Arweave](https: //viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -Se puede usar la siguiente función auxiliar `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` y se agregará a `graph-ts`: +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 5d874b8e7891fcb08410e55f99c609241b9cfc40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:53:59 -0500 Subject: [PATCH 0545/1534] New translations arweave.mdx (Arabic) --- .../pages/ar/subgraphs/cookbook/arweave.mdx | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/arweave.mdx b/website/src/pages/ar/subgraphs/cookbook/arweave.mdx index e2b5d3a37e04..c1ec421993b4 100644 --- a/website/src/pages/ar/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/arweave.mdx @@ -53,7 +53,7 @@ $ graph codegen # generates types from the schema file identified in the manifes $ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder ``` -## تعريف بيان الرسم البياني الفرعي +## تعريف Subgraph Manifest The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: @@ -92,18 +92,18 @@ Arweave data sources support two types of handlers: - `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## تعريف المخطط Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). -## أسيمبلي سكريبت التعيينات +## AssemblyScript Mappings -تمت كتابة المعالجات الخاصة بمعالجة الأحداث بـ[ أسيمبلي سكريبت ](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -168,7 +168,7 @@ Here is an example subgraph for reference: - [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## الأسئلة الشائعة +## FAQ ### Can a subgraph index Arweave and other chains? @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From c93044b8d3b16c27d75488a85c48adfb256b9123 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:00 -0500 Subject: [PATCH 0546/1534] New translations arweave.mdx (Czech) --- .../pages/cs/subgraphs/cookbook/arweave.mdx | 124 +++++++++--------- 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/arweave.mdx b/website/src/pages/cs/subgraphs/cookbook/arweave.mdx index b9156b1c40a2..d59897ad4e03 100644 --- a/website/src/pages/cs/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Vytváření podgrafů na Arweave --- -> Podpora Arweave v uzel grafu a v podgraf Studio je ve fázi beta: s případnými dotazy ohledně vytváření podgrafů Arweave se na nás obraťte na [Discord](https://discord.gg/graphprotocol)! +> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! V této příručce se dozvíte, jak vytvořit a nasadit subgrafy pro indexování blockchainu Arweave. @@ -13,20 +13,20 @@ Protokol Arweave umožňuje vývojářům ukládat data trvale a to je hlavní r Společnost Arweave již vytvořila řadu knihoven pro integraci protokolu do řady různých programovacích jazyků. Další informace naleznete zde: - [Arwiki](https://arwiki.wiki/#/en/main) -- [Zdroje Arweave](https://www.arweave.org/build) +- [Arweave Resources](https://www.arweave.org/build) ## Co jsou podgrafy Arweave? -Graf umožňuje vytvářet vlastní otevřené rozhraní API zvané "podgrafy". Subgrafy slouží k tomu, aby indexerům (provozovatelům serverů) sdělily, která data mají indexovat v blockchainu a uložit na svých serverech, abyste se na ně mohli kdykoli dotazovat pomocí [GraphQL](https://graphql.org/). +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) nyní umí indexovat data na protokolu Arweave. Současná integrace indexuje pouze Arweave jako blockchain (bloky a transakce), zatím neindexuje uložené soubory. +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## Vytvoření podgrafu Arweave Abyste mohli sestavit a nasadit Arweave Subgraphs, potřebujete dva balíčky: -1. `@graphprotocol/graph-cli` nad verzí 0.30.2 - Jedná se o nástroj příkazového řádku pro sestavování a nasazování subgrafů. [Klikněte sem](https://www.npmjs.com/package/@graphprotocol/graph-cli) a stáhněte si pomocí `npm`. -2. `@graphprotocol/graph-ts` nad verzí 0.27.0 – Toto je knihovna typů specifických pro podgrafy. [Klikněte sem](https://www.npmjs.com/package/@graphprotocol/graph-ts) a stáhněte si pomocí `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Komponenty podgrafu @@ -40,7 +40,7 @@ Definuje zdroje dat, které jsou předmětem zájmu, a způsob jejich zpracován Zde definujete, na která data se chcete po indexování subgrafu pomocí jazyka GraphQL dotazovat. Je to vlastně podobné modelu pro API, kde model definuje strukturu těla požadavku. -Požadavky na podgrafy Arweave jsou popsány v [existující dokumentaci](/vývoj/vytvoření-podgrafu/#the-graphql-schema). +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. AssemblyScript Mappings - `mapping.ts` @@ -55,7 +55,7 @@ $ graph build # vygeneruje webové sestavení ze souborů AssemblyScript a přip ## Definice podgrafu Manifest -Manifest podgrafu `subgraph.yaml` identifikuje zdroje dat pro podgraf, zajímavé spouštěče a funkce, které by měly být spuštěny v reakci na tyto spouštěče. Příklad manifestu podgrafu pro podgraf Arweave naleznete níže: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,28 +82,28 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Podgrafy Arweave zavádějí nový druh datového zdroje (`arweave`) -- Síť by měla odpovídat síti v hostitelském uzlu Graf. V aplikaci Podgraf Studio je hlavní síť Arweave `arweave-mainnet` +- Arweave subgraphs introduce a new kind of data source (`arweave`) +- The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Zdroje dat Arweave obsahují nepovinné pole source.owner, což je veřejný klíč peněženky Arweave Datové zdroje Arweave podporují dva typy zpracovatelů: -- `blockHandlers` - Spustí se při každém novém bloku Arweave. Není vyžadován source.owner. -- `transactionHandlers` - Spustí se u každé transakce, jejíž vlastníkem je `source.owner` zdroje dat. V současné době je pro `transactionHandlers` vyžadován vlastník, pokud uživatelé chtějí zpracovávat všechny transakce, měli by jako `source.owner` uvést "" +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > Source.owner může být adresa vlastníka nebo jeho veřejný klíč. - +> > Transakce jsou stavebními kameny permaweb Arweave a jsou to objekty vytvořené koncovými uživateli. +> +> Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. -> Poznámka: [Irys (dříve Bundlr)](https://irys.xyz/) transakce zatím nejsou podporovány. - -## Schema definice +## Definice schématu -Definice schématu popisuje strukturu výsledné databáze podgrafu a vztahy mezi entitami. Toto je nezávislé na původním zdroji dat. Více podrobností o definici schématu podgrafu naleznete [zde](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). -## AssemblyScript Mappings +## AssemblyScript Mapování -Obslužné programy pro zpracování událostí jsou napsány v jazyce [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -146,13 +146,13 @@ class Transaction { } ``` -Obsluhy bloků obdrží `Block`, zatímco transakce obdrží `Transaction`. +Block handlers receive a `Block`, while transactions receive a `Transaction`. -Zápis mapování podgrafu Arweave je velmi podobný psaní mapování podgrafu Ethereum. Další informace získáte kliknutím [sem](/developing/creating-a-subgraph/#writing-mappings). +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). ## Nasazení podgrafu Arweave v Podgraf Studio -Jakmile je podgraf vytvořen na ovládacím panelu Podgraf Studio, můžete jej nasadit pomocí příkazu `graph deploy` CLI. +Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --access-token @@ -160,13 +160,13 @@ graph deploy --access-token ## Dotazování podgrafu Arweave -Koncový bod GraphQL pro podgrafy Arweave je určen definicí schématu se stávajícím rozhraním API. Další informace naleznete v [dokumentaci rozhraní GraphQL API](/subgraphs/querying/graphql-api/). +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. -## Příklad podgrafů +## Příklady podgrafů Zde je příklad podgrafu pro referenci: -- [Příklad podgrafu pro Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ @@ -188,52 +188,52 @@ Source.owner může být veřejný klíč uživatele nebo adresa účtu. ### Jaký je aktuální formát šifrování? -Data jsou obvykle předávána do mapování jako byty (Bytes), které jsou, pokud jsou uloženy přímo, vráceny v podgrafu ve formátu `hex` (např. hashe bloků a transakcí). Mohlo by být vhodné převést je do formátu `base64` nebo `base64 URL`-safe ve vašich mapovacích funkcích, aby odpovídaly tomu, co je zobrazeno v prohlížečích bloků, například v [Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -Lze použít následující pomocnou funkci `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string`, která bude přidána do `graph-ts`: +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From b9abf045217c6a3cc9ec74c9b225adae47a77add Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:01 -0500 Subject: [PATCH 0547/1534] New translations arweave.mdx (German) --- .../pages/de/subgraphs/cookbook/arweave.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/arweave.mdx b/website/src/pages/de/subgraphs/cookbook/arweave.mdx index 16a14b7a0a13..02dd4f8398fc 100644 --- a/website/src/pages/de/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/de/subgraphs/cookbook/arweave.mdx @@ -92,9 +92,9 @@ Arweave data sources support two types of handlers: - `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Schema-Definition @@ -103,7 +103,7 @@ Schema definition describes the structure of the resulting subgraph database and ## AssemblyScript-Mappings -Die Handler für die Ereignisverarbeitung sind in [AssemblyScript](https://www.assemblyscript.org/) geschrieben. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From e05fa0d2b85884912f8f8ad46b97a23386a1bab2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:02 -0500 Subject: [PATCH 0548/1534] New translations arweave.mdx (Italian) --- .../pages/it/subgraphs/cookbook/arweave.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/arweave.mdx b/website/src/pages/it/subgraphs/cookbook/arweave.mdx index 2098d5ab5932..2372025621d1 100644 --- a/website/src/pages/it/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/it/subgraphs/cookbook/arweave.mdx @@ -92,9 +92,9 @@ Arweave data sources support two types of handlers: - `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Schema Definition @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From a6445d7cbd1b65c1eb6aec031081ec57f1bf07da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:03 -0500 Subject: [PATCH 0549/1534] New translations arweave.mdx (Japanese) --- .../pages/ja/subgraphs/cookbook/arweave.mdx | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/arweave.mdx b/website/src/pages/ja/subgraphs/cookbook/arweave.mdx index 66a49d865363..b834f96b5cb9 100644 --- a/website/src/pages/ja/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/arweave.mdx @@ -13,36 +13,36 @@ Arweave プロトコルは、開発者がデータを永久に保存すること Arweaveは既に、さまざまなプログラミング言語でプロトコルを統合するための多数のライブラリを構築しています。詳細については、次を確認できます。 - [Arwiki](https://arwiki.wiki/#/en/main) -- [Arweaveリソース](https://www.arweave.org/build) +- [Arweave Resources](https://www.arweave.org/build) ## Arweaveサブグラフとは? -グラフを使用すると、「サブグラフ」と呼ばれるカスタムのオープン API を構築できます。サブグラフは、を使用していつでもクエリできるようにするために、インデクサー (サーバー オペレーター) に、ブロックチェーンでインデックスを作成してサーバーに保存するデータを伝えるために使用されます。 [GraphQL](https://graphql.org/)。 +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) は、Arweave プロトコルでデータをインデックス化できるようになりました。現在の統合は、Arweave をブロックチェーン (ブロックとトランザクション) としてインデックス付けするだけで、保存されたファイルにはまだインデックス付けしていません。 +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## Arweave サブグラフの作成 Arweaveのサブグラフを構築し展開できるようにするためには、2つのパッケージが必要です。 -1. `@graphprotocol/graph-cli` version 0.30.2以降 - サブグラフの構築と展開を行うコマンドラインツールです。[ここをクリック](https://www.npmjs.com/package/@graphprotocol/graph-cli)し、`npm`を使用してダウンロードしてください。 -2. `@graphprotocol/graph-ts` version 0.27.0以降 - サブグラフに特化した型のライブラリです。[こちらをクリック](https://www.npmjs.com/package/@graphprotocol/graph-ts)して、`npm`でダウンロードしてください。 +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## サブグラフのコンポーネント サブグラフには3つの構成要素があります: -### 1. マニフェスト - `subgraph.yaml` +### 1. Manifest - `subgraph.yaml` 対象のデータ ソースとその処理方法を定義します。 Arweave は新しい種類のデータ ソースです。 -### 2. スキーマ - `schema.graphql` +### 2. Schema - `schema.graphql` ここでは、GraphQL を使用してサブグラフにインデックスを付けた後にクエリできるようにするデータを定義します。これは実際には API のモデルに似ており、モデルはリクエスト本文の構造を定義します。 -Arweaveサブグラフの要件は、[existing documentation](/developing/creating-a-subgraph/#the-graphql-schema)に網羅されています。 +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. アセンブリスクリプトマッピング - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` これは、リスニングしているデータソースと誰かがやりとりするときに、データをどのように取得し、保存するかを決定するロジックです。データは変換され、あなたがリストアップしたスキーマに基づいて保存されます。 @@ -53,9 +53,9 @@ $ graph codegen # マニフェストで識別されたようにファイルか $ グラフ ビルド # AssemblyScript ファイルから Web アセンブリを生成し、/build フォルダにすべてのサブグラフ ファイルを準備します ``` -## サブグラフ マニフェスト定義 +## サブグラフマニフェストの定義 -サブグラフ マニフェスト `subgraph.yaml` は、サブグラフのデータ ソース、関心のあるトリガー、およびこれらのトリガーに応答して実行される関数を識別します。Arweaveサブグラフのサブグラフ マニフェストの例については、以下を参照してください: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,28 +82,28 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave サブグラフは新しい種類のデータ ソースを導入します (`arweave`) +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave データ ソースには、オプションの source.owner フィールドが導入されています。これは、Arweave ウォレットの公開鍵です。 Arweaveデータソースは 2 種類のハンドラーをサポートしています: -- `blockHandlers` - すべての新しいArweaveブロックに対して実行されます。source.ownerは必要ありません。 -- `transactionHandlers` - データ ソースの `source.owner` が所有者であるすべてのトランザクションで実行されます。現在、`transactionHandlers` には所有者が必要です。ユーザーがすべてのトランザクションを処理したい場合は、`source.owner` として "" を指定する必要があります。 +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > Source.owner は、所有者のアドレスまたは公開鍵にすることができます。 - +> > トランザクションはArweave permawebの構成要素であり、エンドユーザーによって作成されるオブジェクトです。 - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## スキーマ定義 -スキーマの定義は、結果として得られるサブグラフ・データベースの構造と、エンティティ間の関係を記述する。これは、元のデータソースに依存しません。スキーマ定義の詳細は、[ こちら](/developing/creating-a-subgraph/#the-graphql-schema)にあります。 +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript マッピング -イベントを処理するハンドラは、[AssemblyScript](https://www.assemblyscript.org/) で記述されています。 +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -146,9 +146,9 @@ class Transaction { } ``` -ブロックハンドラは`Block`を受け取り、トランザクションは`Transaction`を受け取ります。 +Block handlers receive a `Block`, while transactions receive a `Transaction`. -Arweave サブグラフのマッピングの記述は、Ethereum サブグラフのマッピングの記述と非常に似ています。詳細については、[こちら](/developing/creating-a-subgraph/#writing-mappings)をクリックしてください。 +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). ## Deploying an Arweave Subgraph in Subgraph Studio @@ -160,15 +160,15 @@ graph deploy --access-token ## Arweaveサブグラフのクエリ -ArweaveサブグラフのGraphQLエンドポイントは、スキーマ定義によって決定され、既存のAPIインタフェースが使用されます。詳細は[GraphQL API documentation](/subgraphs/querying/graphql-api/)を参照してください。 +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## サブグラフの例 参考までにサブグラフの例を紹介します: -- [Arweaveのサブグラフの例](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## よくある質問 +## FAQ ### サブグラフは Arweave やその他のチェーンにインデックスを付けることができますか? @@ -188,52 +188,52 @@ Source.ownerには、ユーザの公開鍵またはアカウントアドレス ### 現在の暗号化フォーマットは? -通常、データはバイトとしてマッピングに渡され、直接格納されている場合はサブグラフに `hex` 形式で返されます (例: ブロックおよびトランザクション ハッシュ). あなたはAに変換したいかもしれません `base64` or `base64 URL` 私たちのマッピングでの安全なフォーマットを日本語に翻訳すると、ブロックエクスプローラーなどで表示されるものに一致するようになります[Arweave Explorer](https://viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -以下の`bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` ヘルパー関数が使用可能で、`graph-ts`に追加される予定です。 +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 0d644ffd56dd25e8b3632dea28305de3f51eb94e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:04 -0500 Subject: [PATCH 0550/1534] New translations arweave.mdx (Korean) --- .../pages/ko/subgraphs/cookbook/arweave.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/arweave.mdx b/website/src/pages/ko/subgraphs/cookbook/arweave.mdx index 2098d5ab5932..2372025621d1 100644 --- a/website/src/pages/ko/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/arweave.mdx @@ -92,9 +92,9 @@ Arweave data sources support two types of handlers: - `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Schema Definition @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From df781fb08fe873497d6f66489a9c5ca8e5d99b79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:05 -0500 Subject: [PATCH 0551/1534] New translations arweave.mdx (Dutch) --- .../pages/nl/subgraphs/cookbook/arweave.mdx | 98 +++++++++---------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/arweave.mdx b/website/src/pages/nl/subgraphs/cookbook/arweave.mdx index 0d8a71a7131c..1ff7fdd460fc 100644 --- a/website/src/pages/nl/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/arweave.mdx @@ -13,20 +13,20 @@ Het Arweave protocol stelt ontwikkelaars in staat om gegevens permanent op te sl Arweave heeft al talloze bibliotheken gebouwd voor het integreren van het protocol in verschillende programmeertalen. Voor meer informatie kun je kijken op: - [Arwiki](https://arwiki.wiki/#/en/main) -- [Arweave Bronnen](https://www.arweave.org/build) +- [Arweave Resources](https://www.arweave.org/build) ## Wat zijn Arweave Subgraphs? -The Graph stelt je in staat om aangepaste open API's genaamd "Subgraphs" te bouwen. Subgraphs worden gebruikt om indexers (serveroperators) te vertellen welke gegevens ze moeten indexeren op een blockchain en op hun servers moeten opslaan, zodat je deze op elk gewenst moment kunt opvragen met behulp van [GraphQL](https://graphql.org/). +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) kan nu gegevens indexeren op het Arweave protocol. De huidige integratie indexeert alleen Arweave als een blockchain (blokken en transacties), het indexeert nog niet de opgeslagen bestanden. +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## Bouwen van een Arweave Subgraph Voor het kunnen bouwen en implementeren van Arweave Subgraphs, heb je twee paketten nodig: -1. `@graphprotocol/graph-cli` hierboven versie 0.30.2 - dit is een command-line tool voor het bouwen en implementeren van Subgraphs. [Klik hier](https://www.npmjs.com/package/@graphprotocol/graph-cli) voor het downloaden door middel van `npm`. -2. `@graphprotovol/graph-ts` hierboven versie 0.27.0 - Dit is een bibliotheek van subgeraph-specifieke types. [Klik hier](https://www.npmjs.com/package/@graphprotocol/graph-ts) voor het downloaden door middel van `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraph's componenten @@ -40,9 +40,9 @@ Definieert gegevensbronnen die van belang zijn en hoe deze verwerkt moeten worde Hier definieer je welke gegevens je wilt kunnen opvragen na het indexeren van je subgraph door het gebruik van GraphQL. Dit lijkt eigenlijk op een model voor een API, waarbij het model de structuur van een verzoek definieert. -De benodigdheden voor Arweave subgraphs zijn gedekt door de [bestaande documentatie](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. AssemblyScript Toewijzingen - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` Dit is de logica die definieert hoe data zou moeten worden opgevraagd en opgeslagen wanneer iemand met de gegevens communiceert waarnaar jij aan het luisteren bent. De gegevens worden vertaald en is opgeslagen gebaseerd op het schema die je genoteerd hebt. @@ -53,9 +53,9 @@ $ graph codegen # genereert types van het schema bestand die geïdentificeerd is $ graph build # genereert Web Assembly vanuit de AssemblyScript-bestanden, en bereidt alle Subgraph-bestanden voor in een /build map ``` -## Subgraph Manifest Definitie +## Subgraph Manifest Definition -De subgraph manifest `subgraph.yaml` identificeert de gegevens bronnen voor de subgraph, de trekkers van interesse, en de functies die moeten worden uitgevoerd als antwoord op die trekkers. Zie hieronder voor een voorbeeld subgraph manifest voor een Arweave Subgraph: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersie: 0.0.5 @@ -82,19 +82,19 @@ dataSources: - afhandelaar: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduceert een nieuw type data bron (`arweave`) +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave data bronnen introduceert een optionele bron.eigenaar veld, dat de openbare sleutel is van een Arweave wallet Arweave data bronnen ondersteunt twee typen verwerkers: -- `blockHandlers` - Draait op elk nieuw Arweave block. Geen enkele bron.eigenaar is vereist. -- `transactieVerwerker` - Draait op elke transactie waarbij de data brons `bron.eigenaar` de eigenaar is. Momenteel is een eigenaar vereist voor `transactieVerwerker`, als gebruikers alle transacties willen verwerken moeten zij aanbieden als de `bron.eigenaar` +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Schema Definition @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From c9991d5f5526835d0ab19146411c986065e0692f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:07 -0500 Subject: [PATCH 0552/1534] New translations arweave.mdx (Polish) --- .../pages/pl/subgraphs/cookbook/arweave.mdx | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/arweave.mdx b/website/src/pages/pl/subgraphs/cookbook/arweave.mdx index 2098d5ab5932..2372025621d1 100644 --- a/website/src/pages/pl/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/arweave.mdx @@ -92,9 +92,9 @@ Arweave data sources support two types of handlers: - `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Schema Definition @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From a4f0f731b48b6943c97e7707a3b263d3d2c4e6f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:08 -0500 Subject: [PATCH 0553/1534] New translations arweave.mdx (Portuguese) --- .../pages/pt/subgraphs/cookbook/arweave.mdx | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/arweave.mdx b/website/src/pages/pt/subgraphs/cookbook/arweave.mdx index 8853a7adee4b..a84800d73d48 100644 --- a/website/src/pages/pt/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Construindo Subgraphs no Arweave --- -> O apoio ao Arweave no Graph Node e no Subgraph Studio está em beta: por favor nos contacte no [Discord](https://discord.gg/graphprotocol) se tiver dúvidas sobre a construção de subgraphs no Arweave! +> O apoio ao Arweave no Graph Node, e no Subgraph Studio, está em beta: por favor nos contacte no [Discord](https://discord.gg/graphprotocol) se tiver dúvidas sobre como construir subgraphs no Arweave! Neste guia, você aprenderá como construir e lançar Subgraphs para indexar a blockchain Arweave. @@ -13,20 +13,20 @@ O protocolo Arweave permite que programadores armazenem dados permanentemente. E O Arweave já construiu várias bibliotecas para integrar o protocolo num número de línguas de programação diferentes. Para mais informações, pode-se conferir: - [Arwiki](https://arwiki.wiki/#/en/main) -- [Arweave Resources](https://www.arweave.org/build) +- [Recursos do Arweave](https://www.arweave.org/build) ## O que são Subgraphs no Arweave? -The Graph lhe permite construir APIs abertas e customizadas chamadas "Subgraphs". Subgraphs são usados para contar aos indexadores (operadores de servidor) quais dados devem ser indexados em uma blockchain e salvar em seus servidores para que você possa consultá-la a qualquer hora usando o [GraphQL](https://graphql.org/). +The Graph permite a construção de APIs abertas e personalizadas chamadas "Subgraphs", que servem para contar aos indexadores (operadores de servidor) quais dados devem ser indexados em uma blockchain e guardados nos seus servidores para serem consultados a qualquer hora em queries pelo [GraphQL](https://graphql.org/). -O [Graph Node](https://github.com/graphprotocol/graph-node) é atualmente capaz de indexar dados no protocolo Arweave. A integração atual indexa apenas o Arweave como uma blockchain (blocos e transações), mas não indexa os arquivos armazenados no momento. +O [Graph Node](https://github.com/graphprotocol/graph-node) é atualmente capaz de indexar dados no protocolo Arweave. A integração atual indexa apenas o Arweave como uma blockchain (blocos e transações), mas no momento, não indexa os arquivos armazenados. ## Construindo um Subgraph no Arweave Para construir e lançar Subgraphs no Arweave, são necessários dois pacotes: -1. `@graphprotocol/graph-cli` acima da versão 0.30.2 — Esta é uma ferramenta de linha de comandos para a construção e lançamento de subgraphs. [Clique aqui](https://www.npmjs.com/package/@graphprotocol/graph-cli) para baixá-la usando o `npm`. -2. `@graphprotocol/graph-ts` acima da versão 0.07.2 — Esta é uma biblioteca de tipos específicos a subgraphs. [Clique aqui](https://www.npmjs.com/package/@graphprotocol/graph-ts) para baixar usando o `npm`. +1. `@graphprotocol/graph-cli` acima da versão 0.30.2 — Esta é uma ferramenta de linha de comandos para a construção e implantação de subgraphs. [Clique aqui](https://www.npmjs.com/package/@graphprotocol/graph-cli) para baixá-la usando o `npm`. +2. `@graphprotocol/graph-ts` acima da versão 0.27.0 — Esta é uma ferramenta de linha de comandos para a construção e implantação de subgraphs. [Clique aqui](https://www.npmjs.com/package/@graphprotocol/graph-ts) para baixá-la usando o `npm`. ## Os componentes de um subgraph @@ -40,7 +40,7 @@ Define as fontes de dados de interesse, e como elas devem ser processadas. O Arw Aqui é possível definir quais dados queres consultar após indexar o seu subgraph utilizando o GraphQL. Isto é como um modelo para uma API, onde o modelo define a estrutura de um órgão de requisito. -Os requerimentos para subgraphs no Arweave são cobertos pela [documentação existente](/developing/creating-a-subgraph/#the-graphql-schema). +Os requisitos para subgraphs do Arweave estão cobertos pela [documentação](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. Mapeamentos de AssemblyScript - `mapping.ts` @@ -53,9 +53,9 @@ $ graph codegen # gera tipos do arquivo de schema identificado no manifest $ graph build # gera Web Assembly dos arquivos AssemblyScript, e prepara todos os arquivos do subgraph em uma pasta /build ``` -## Definição do Manifest do Subgraph +## Definição de Manifest de Subgraph -O manifest do subgraph `subgraph.yaml` identifica as fontes de dados ao subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a tais gatilhos. Veja abaixo um exemplo de um manifest de subgraph para um subgraph no Arweave: +O manifest do subgraph `subgraph.yaml` identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a tais gatilhos. Veja abaixo um exemplo de um manifest de subgraph, para um subgraph no Arweave: ```yaml specVersion: 0.0.5 @@ -84,28 +84,28 @@ dataSources: - Subgraphs no Arweave introduzem uma nova categoria de fonte de dados (`arweave`) - A rede deve corresponder a uma rede no Graph Node que a hospeda. No Subgraph Studio, a mainnet do Arweave é `arweave-mainnet` -- Fontes de dados no Arweave introduzem um campo `source.owner` opcional, a chave pública de uma carteira no Arweave +- Fontes de dados no Arweave introduzem um campo source.owner opcional, a chave pública de uma carteira no Arweave -Fontes de dados no Arweave apoiam duas categorias de _handlers_: +Fontes de dados no Arweave apoiam duas categorias de handlers: -- `blockHandlers` - Executar em cada bloco novo no Arweave. Nenhum `source.owner` é exigido. -- `transactionHandlers` — Executar em todas as transações onde o `source.owner` da fonte de dados é o dono. Atualmente, um dono é exigido para o `transactionHandlers`; caso utilizadores queiram processar todas as transações, eles devem providenciar "" como o `source.owner` - -> O `source.owner` pode ser o endereço do dono, ou sua Chave Pública. +- `blockHandlers` — Executar em cada bloco novo no Arweave. Nenhum `source.owner` necessário. +- `transactionHandlers` — Executar em todas as transações cujo dono é o source.owner da fonte de dados. Atualmente, é necessário ter um dono para o transactionHandlers; caso um utilizador queira processar todas as transações, ele deve providenciar "" como o `source.owner` +> O source.owner pode ser o endereço do dono, ou sua Chave Pública. +> > Transações são os blocos de construção da permaweb do Arweave, além de serem objetos criados para utilizadores finais. +> +> Nota: No momento, não há apoio para transações no [Irys (antigo Bundlr)](https://irys.xyz/). -> Nota: Transações no [Irys (antigo Bundlr)](https://bundlr.network/) não são apoiadas presentemente. - -## Definição de _Schema_ +## Definição de Schema -A definição de Schema descreve a estrutura do banco de dados resultado do subgraph, e os relacionamentos entre entidades. Isto é agnóstico da fonte de dados original. Há mais detalhes na definição de schema de subgraph [aqui](/developing/creating-a-subgraph/#the-graphql-schema). +A definição de Schema descreve a estrutura do banco de dados resultado do subgraph, e os relacionamentos entre entidades. Isto é agnóstico da fonte de dados original. Para mais detalhes na definição de schema de subgraph, [clique aqui](/developing/creating-a-subgraph/#the-graphql-schema). -## Mapeamentos de AssemblyScript +## Mapeamentos em AssemblyScript -Os _handlers_ para eventos de processamento são escritos em [AssemblyScript](https://www.assemblyscript.org/). +Os handlers para processamento de eventos estão escritos em [AssemblyScript](https://www.assemblyscript.org/). -Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +A indexação do Arweave introduz tipos de dados específicos para esse ecossistema à [API do AssemblyScript](/subgraphs/developing/creating/graph-ts/api/). ```tsx class Block { @@ -146,13 +146,13 @@ class Transaction { } ``` -_Handlers_ de bloco recebem um `Block`, enquanto transações recebem uma `Transaction`. +Handlers de bloco recebem um `Block`, enquanto transações recebem um `Transaction`. -Escrever os mapeamentos de um Subgraph no Arweave é muito similar à escrita dos mapeamentos de um Subgraph no Ethereum. Para mais informações, clique [aqui](/developing/creating-a-subgraph/#writing-mappings). +Escrever os mapeamentos de um Subgraph no Arweave é parecido com a escrita dos mapeamentos de um Subgraph no Ethereum. Para mais informações, clique [aqui](/developing/creating-a-subgraph/#writing-mappings). ## Como lançar um Subgraph no Arweave ao Subgraph Studio -Após criar o seu Subgraph no painel de controlo do Subraph Studio, você pode lançá-lo com o código de linha de comando `graph deploy`. +Após criar o seu Subgraph no painel de controlo do Subgraph Studio, este pode ser implantado com o comando `graph deploy`. ```bash graph deploy --access-token @@ -166,9 +166,9 @@ O ponto final do GraphQL para subgraphs no Arweave é determinado pela definiç Aqui está um exemplo de subgraph para referência: -- [Exemplo de Subgraph para o Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Exemplo de subgraph para o Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## Perguntas Frequentes +## FAQ ### Um subgraph pode indexar o Arweave e outras chains? @@ -184,56 +184,56 @@ Isto não é apoiado no momento. ### Como posso filtrar transações para uma conta específica? -O `source.owner` pode ser a chave pública ou o endereço da conta do utilizador. +O source.owner pode ser a chave pública ou o endereço da conta do utilizador. ### Qual é o formato atual de encriptação? -Os dados são geralmente passados aos mapeamentos como Bytes, que se armazenados diretamente, são retornados ao subgraph em um formato `hex` (por ex. hashes de transações e blocos). Você pode querer convertê-lo a um formato seguro em `base64` ou `base64 URL` em seus mapeamentos, para combinar com o que é exibido em exploradores de blocos, como o [Arweave Explorer](https://viewblock.io/arweave/). +Os dados são geralmente passados aos mapeamentos como Bytes, que se armazenados diretamente, são retornados ao subgraph em um formato `hex` (por ex. hashes de transações e blocos). Você pode querer convertê-lo a um formato seguro para `base64` ou `base64 URL` em seus mapeamentos, para combinar com o que é exibido em exploradores de blocos, como o [Arweave Explorer](https://viewblock.io/arweave/). -A seguinte função `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` pode ser usada, e será adicionada ao `graph-ts`: +A seguinte função de helper `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` pode ser usada, e será adicionada ao `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 64865d75f6c310b70c31e89b434e41ebc527ecf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:08 -0500 Subject: [PATCH 0554/1534] New translations arweave.mdx (Russian) --- .../pages/ru/subgraphs/cookbook/arweave.mdx | 122 +++++++++--------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/arweave.mdx b/website/src/pages/ru/subgraphs/cookbook/arweave.mdx index 186e6a501f5c..a7f24e1bf79e 100644 --- a/website/src/pages/ru/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Создание Субграфов на Arweave --- -> Поддержка Arweave в Graph Node и Subgraph Studio находится в стадии бета-тестирования: обращайтесь к нам в [Discord](https://discord.gg/graphprotocol), если у Вас возникнут вопросы о создании субграфов Arweave! +> Поддержка Arweave в Graph Node и Subgraph Studio находится на стадии бета-тестирования. Если у Вас есть вопросы о создании субграфов Arweave, свяжитесь с нами в [Discord](https://discord.gg/graphprotocol)! Из этого руководства Вы узнаете, как создавать и развертывать субграфы для индексации блокчейна Arweave. @@ -17,16 +17,16 @@ Arweave уже создала множество библиотек для ин ## Что такое субграфы Arweave? -The Graph позволяет Вам создавать пользовательские открытые API, называемые "Subgraphs". Субграфы используются для указания индексаторами (операторам серверов), какие данные индексировать в блокчейне и сохранять на их серверах, чтобы Вы могли запросить их в любое время, используя [GraphQL](https://graphql.org/). +The Graph позволяет создавать собственные открытые API, называемые "Субграфами". Субграфы используются для указания индексаторам (операторам серверов), какие данные индексировать на блокчейне и сохранять на их серверах, чтобы Вы могли запрашивать эти данные в любое время используя [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) теперь может индексировать данные по протоколу Arweave. Текущая интеграция только индексирует Arweave как блокчейн (блоки и транзакции), но пока еще не индексирует сохраненные файлы. +[Graph Node](https://github.com/graphprotocol/graph-node) теперь может индексировать данные на протоколе Arweave. Текущая интеграция индексирует только Arweave как блокчейн (блоки и транзакции), она еще не индексирует сохраненные файлы. ## Построение Субграфа на Arweave Чтобы иметь возможность создавать и развертывать Субграфы на Arweave, Вам понадобятся два пакета: -1. `@graphprotocol/graph-cli` выше версии 0.30.2 - это инструмент командной строки для построения и развертывания субграфов. [Нажмите здесь](https://www.npmjs.com/package/@graphprotocol/graph-cli), чтобы загрузить с помощью `npm`. -2. `@graphprotocol/graph-ts` выше версии 0.27.0 - это библиотека типов, специфичных для субграфов. [Нажмите здесь](https://www.npmjs.com/package/@graphprotocol/graph-ts), чтобы загрузить с помощью `npm`. +1. `@graphprotocol/graph-cli` версии выше 0.30.2 — это инструмент командной строки для создания и развертывания субграфов. [Нажмите здесь](https://www.npmjs.com/package/@graphprotocol/graph-cli), чтобы скачать с помощью `npm`. +2. `@graphprotocol/graph-ts` версии выше 0.27.0 — это библиотека типов, специфичных для субграфов. [Нажмите здесь](https://www.npmjs.com/package/@graphprotocol/graph-ts), чтобы скачать с помощью `npm`. ## Составляющие Субграфов @@ -40,9 +40,9 @@ The Graph позволяет Вам создавать пользователь Здесь Вы определяете, какие данные хотите иметь возможность запрашивать после индексации своего субграфа с помощью GraphQL. На самом деле это похоже на модель для API, где модель определяет структуру тела запроса. -Требования к субграфам Airweave описаны в [существующей документации](/developing/creating-a-subgraph/#the-graphql-schema). +Требования для субграфов Arweave описаны в [имеющейся документации](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. Мэппинги AssemblyScript - `mapping.ts` +### 3. Мэппинги на AssemblyScript - `mapping.ts` Это логика, которая определяет, как данные должны извлекаться и храниться, когда кто-то взаимодействует с источниками данных, которые Вы отслеживаете. Данные переводятся и сохраняются в соответствии с указанной Вами схемой. @@ -53,9 +53,9 @@ $ graph codegen # генерирует типы из файла схемы, ук $ graph build # генерирует Web Assembly из файлов AssemblyScript и подготавливает все файлы субграфа в папке /build ``` -## Определение манифеста Субграфа +## Определение манифеста субграфа -Манифест субграфа `subgraph.yaml` определяет источники данных для субграфа, представляющие интерес триггеры и функции, которые должны выполняться в ответ на эти триггеры. Ниже приведен пример манифеста субграфа для субграфа Arweave: +Манифест субграфа `subgraph.yaml` определяет источники данных для субграфа, триггеры, представляющие интерес, и функции, которые должны выполняться в ответ на эти триггеры. Ниже приведён пример манифеста субграфа для Arweave: ```yaml specVersion: 0.0.5 @@ -82,30 +82,30 @@ dataSources: - handler: handleTx # имя функции в файле мэппинга ``` -- Субграфы Arweave представляют новый вид источника данных (`arweave`) -- Сеть должна соответствовать сети на хостинге Graph Node. В Subgraph Studio основной сетью Arweave является `arweave-mainnet` +- Субграфы Arweave вводят новый тип источника данных (`arweave`) +- Сеть должна соответствовать сети на размещенной Graph Node. В Subgraph Studio мейннет Arweave обозначается как `arweave-mainnet` - Источники данных Arweave содержат необязательное поле source.owner, которое является открытым ключом кошелька Arweave Источники данных Arweave поддерживают два типа обработчиков: -- `blockHandlers` - запускаются на каждом новом блоке Arweave. Не требуется source.owner. -- `transactionHandlers` - запускаются при каждой транзакции, в которой источник данных `source.owner` является владельцем. В настоящее время для `transactionHandlers` требуется владелец, если пользователи хотят обрабатывать все транзакции, они должны указать "" в качестве `source.owner` +- `blockHandlers` — выполняется при каждом новом блоке Arweave. source.owner не требуется. +- `transactionHandlers` — выполняется при каждой транзакции, где `source.owner` является владельцем источника данных. На данный момент для `transactionHandlers` требуется указать владельца. Если пользователи хотят обрабатывать все транзакции, они должны указать `""` в качестве `source.owner` > Source.owner может быть адресом владельца или его Публичным ключом. - +> > Транзакции являются строительными блоками Arweave permaweb, и они представляют собой объекты, созданные конечными пользователями. - -> Примечание: Транзакции [Irys (ранее Bundlr)](https://irys.xyz/) пока не поддерживаются. +> +> Примечание: транзакции [Irys (ранее Bundlr)](https://irys.xyz/) пока не поддерживаются. ## Определение схемы -Определение схемы описывает структуру результирующей базы данных субграфа и взаимосвязи между объектами. Это не зависит от исходного источника данных. Более подробная информация об определении схемы субграфа приведена [здесь](/developing/creating-a-subgraph/#the-graphql-schema). +Определение схемы описывает структуру базы данных итогового субграфа и взаимосвязи между объектами. Это не зависит от исходного источника данных. Более подробную информацию об определении схемы субграфа можно найти [здесь](/developing/creating-a-subgraph/#the-graphql-schema). ## Мэппинги AssemblyScript -Обработчики событий написаны на [AssemblyScript](https://www.assemblyscript.org/). +Обработчики для обработки событий написаны на [AssemblyScript](https://www.assemblyscript.org/). -Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +Индексирование Arweave вводит специфичные для Arweave типы данных в [API AssemblyScript](https://thegraph. com/docs/using-graph-ts). ```tsx class Block { @@ -146,13 +146,13 @@ class Transaction { } ``` -Обработчики блоков получают `Block`, в то время как транзакции получают `Transaction`. +Обработчики блоков получают `Block`, в то время как обработчики транзакций получают `Transaction`. -Написание мэппингов субграфа Arweave очень похоже на написание мэппингов субграфа Ethereum. Для получения дополнительной информации нажмите [здесь](/developing/creating-a-subgraph/#writing-mappings). +Написание мэппингов для субграфа Arweave очень похоже на написание мэппингов для субграфа Ethereum. Для получения дополнительной информации нажмите [сюда](/developing/creating-a-subgraph/#writing-mappings). ## Развертывание субграфа Arweave в Subgraph Studio -После создания субграфа на панели управления Subgraph Studio его можно развернуть с помощью команды CLI `graph Deploy`. +Как только Ваш субграф будет создан на панели управления Subgraph Studio, Вы можете развернуть его с помощью команды CLI `graph deploy`. ```bash graph deploy --access-token @@ -160,7 +160,7 @@ graph deploy --access-token ## Запрос субграфа Arweave -Конечная точка GraphQL для субграфов Arweave устанавливается определением схемы с помощью существующего интерфейса API. Пожалуйста, посетите [документацию GraphQL API](/subgraphs/querying/graphql-api/) для получения дополнительной информации. +Конечная точка GraphQL для субграфов Arweave определяется схемой и существующим интерфейсом API. Для получения дополнительной информации ознакомьтесь с [документацией по API GraphQL](/subgraphs/querying/graphql-api/). ## Примеры субграфов @@ -168,7 +168,7 @@ graph deploy --access-token - [Пример субграфа для Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## Часто задаваемые вопросы +## FAQ ### Может ли субграф индексировать Arweave и другие чейны? @@ -188,52 +188,52 @@ Source.owner может быть открытым ключом пользова ### Каков текущий формат шифрования? -Данные обычно передаются в мэппинги в виде Байтов, которые при непосредственном сохранении возвращаются в субграф в формате `hex` (например, хэши блоков и транзакций). Возможно, Вы захотите преобразовать в безопасный формат `base64` или `base64 URL` в своих мэппингах, чтобы они соответствовали тому, что отображается в обозревателях блоков, таких, как [Arweave Explorer](https://viewblock.io/arweave/). +Данные обычно передаются в мэппингах в виде байтов (Bytes), которые, если хранятся напрямую, возвращаются в субграф в формате `hex` (например, хэши блоков и транзакций). Вы можете захотеть преобразовать их в формат `base64` или `base64 URL`-безопасный в Ваших мэппингах, чтобы они соответствовали тому, что отображается в блок-обозревателях, таких как [Arweave Explorer](https://viewblock.io/arweave/). -Можно использовать следующую вспомогательную функцию `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string`, которая будет добавлена в `graph-ts`: +Следующая вспомогательная функция `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` может быть использована и будет добавлена в `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From c1375207a254f92cef94f26c21f4208b16844726 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:09 -0500 Subject: [PATCH 0555/1534] New translations arweave.mdx (Swedish) --- .../pages/sv/subgraphs/cookbook/arweave.mdx | 112 +++++++++--------- 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/arweave.mdx b/website/src/pages/sv/subgraphs/cookbook/arweave.mdx index 019409ea12e6..8a78a4ffa184 100644 --- a/website/src/pages/sv/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/arweave.mdx @@ -17,22 +17,22 @@ Arweave har redan byggt ett flertal bibliotek för att integrera protokollet i e ## Vad är Arweave-subgrafer? -Grafen låter dig bygga anpassade öppna API:er som kallas "Subgraphs". Subgrafer används för att tala om för indexerare (serveroperatörer) vilka data som ska indexeras på en blockchain och sparas på deras servrar så att du när som helst ska kunna fråga dem med [ GraphQL ](https://graphql.org/). +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) Kan nu indexera data på Arweave-protokollet. Den nuvarande integrationen indexerar bara Arweave som en blockchain (block och transaktioner), den indexerar inte de lagrade filerna ännu. +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## Bygga en Arweave-subgraf För att kunna bygga och distribuera Arweave Subgraphs behöver du två paket: -1. `@graphprotocol/graph-cli` ovan version 0.30.2 - Detta är ett kommandoradsverktyg för att bygga och distribuera subgrafer. [ Klicka här ](https://www.npmjs.com/package/@graphprotocol/graph-cli) för att ladda ner med `npm`. -2. `@graphprotocol/graph-ts` ovan version 0.27.0 - Detta är ett bibliotek med subgrafspecifika typer. [Klicka här](https://www.npmjs.com/package/@graphprotocol/graph-ts) för att ladda ner med `npm`. +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## Subgraphs komponenter Det finns tre komponenter i en subgraf: -### 1. Manifestera - `subgraph.yaml` +### 1. Manifest - `subgraph.yaml` Definierar datakällorna av intresse och hur de ska behandlas. Arweave är en ny typ av datakälla. @@ -40,9 +40,9 @@ Definierar datakällorna av intresse och hur de ska behandlas. Arweave är en ny Här definierar du vilken data du vill kunna fråga efter att du har indexerat din subgrafer med GraphQL. Detta liknar faktiskt en modell för ett API, där modellen definierar strukturen för en begäran. -Kraven för Arweave subgrafer täcks av den[ befintliga dokumentationen ](/developing/creating-a-subgraph/#the-graphql-schema). +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. AssemblyScript mappningar - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` Detta är logiken som avgör hur data ska hämtas och lagras när någon interagerar med datakällorna du lyssnar på. Data översätts och lagras utifrån det schema du har listat. @@ -55,7 +55,7 @@ $ graph build # generates Web Assembly from the AssemblyScript files, and prepar ## Definition av subgraf manifestet -Subgrafmanifestet `subgraph.yaml` identifierar datakällorna för subgrafen, utlösare av intresse och funktionerna som ska köras som svar på dessa utlösare. Se nedan för ett exempel på subgraf manifest för en Arweave-subgraf: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,28 +82,28 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgrafer introducerar en ny typ av datakälla (`arweave`) +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave datakällor introducerar ett valfritt source.owner fält, som är den publika nyckeln till en Arweave plånbok Arweave datakällor stöder två typer av hanterare: -- `blockHandlers` - Kör på varje nytt Arweave block. Ingen source.owner krävs. -- `transactionHandlers` - Kör på varje transaktion där datakällans `source.owner` är ägare. För närvarande krävs en ägare för `transactionHandlers`, om användare vill bearbeta alla transaktioner ska de ange "" som `source.owner` +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > De source.owner kan vara ägarens adress eller deras publika nyckel. - +> > Transaktioner är byggstenarna i Arweave permaweb och de är objekt skapade av slutanvändare. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Schema Definition -Schema definition beskriver strukturen för den resulterande subgraf databasen och relationerna mellan enheter. Detta är agnostiskt för den ursprungliga datakällan. Det finns mer information om definitionen av subgraf schema [här](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript mappningar -Hanterarna för bearbetning av händelser är skrivna i [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -146,9 +146,9 @@ class Transaction { } ``` -Blockhanterare får ett `Block`, medan transaktioner får en `Transaktion`. +Block handlers receive a `Block`, while transactions receive a `Transaction`. -Att skriva mappningar av en Arweave subgrafer är mycket lik att skriva mappningar av en Ethereum subgrafer. För mer information, klicka [här](/developing/creating-a-subgraph/#writing-mappings). +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). ## Deploying an Arweave Subgraph in Subgraph Studio @@ -160,13 +160,13 @@ graph deploy --access-token ## Fråga efter en Arweave-subgraf -GraphQL slutpunkten för Arweave subgrafer bestäms av schemadefinitionen, med det befintliga API gränssnittet. Besök [GraphQL API dokumentationen](/subgraphs/querying/graphql-api/) för mer information. +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exempel på subgrafer Här är ett exempel på subgraf som referens: -- [Exempel på subgraf för Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ @@ -188,52 +188,52 @@ Source.owner kan vara användarens publika nyckel eller kontoadress. ### Vad är det aktuella krypteringsformatet? -Data skickas i allmänhet till mappningarna som bytes, som om de lagras direkt returneras i subgrafen i ett `hex`-format (ex. block- och transaktionshashar). Du kanske vill konvertera till ett `base64` eller `base64 URL`-säkert format i dina mappningar, för att matcha det som visas i blockutforskare som [Arweave Explorer](https: //viewblock.io/arweave/). +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -Följande `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` hjälpfunktion kan användas och kommer att läggas till i `graph-ts`: +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From ca373c7f9c67e5c61359b2b21cab6705f608e6d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:11 -0500 Subject: [PATCH 0556/1534] New translations arweave.mdx (Turkish) --- .../pages/tr/subgraphs/cookbook/arweave.mdx | 126 +++++++++--------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/arweave.mdx b/website/src/pages/tr/subgraphs/cookbook/arweave.mdx index bf71b1120b55..8495fd68c39b 100644 --- a/website/src/pages/tr/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Arweave Üzerinde Subgraphlar Oluşturma --- -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Graph Düğümü ve Subgraph Studio'daki Arweave desteği beta aşamasındadır: Arweave subgraph'ları oluşturma konusunda herhangi bir sorunuz varsa lütfen [Discord](https://discord.gg/graphprotocol) üzerinden bizimle iletişime geçin! Bu rehberde, Arweave blok zincirini indekslemek için nasıl Subgraphs oluşturacağınızı ve dağıtacağınızı öğreneceksiniz. @@ -17,22 +17,22 @@ Arweave, protokolü farklı programlama dillerine entegre etmek için halihazır ## Arweave Subgraphları Nedir? -Graph, "Subgraphs" adı verilen size özel açık API'lar oluşturmanıza olanak tanır. Subgraphlar, indeksleyicilere (sunucu operatörleri) bir blok zincirinde hangi verileri indekslemeleri gerektiğini ve daha sonra istedikleri zaman [GraphQL](https://graphql.org/) kullanarak bu verileri sorgulayabilmeleri adına verileri sunucularında kaydetmeleri gerektiğini söylemek için kullanılır. +The Graph, "Subgraph" adı verilen özel açık API'ler oluşturmanıza olanak tanır. Subgraph'ler, endeksleyicilere (sunucu operatörleri) bir blokzincirinde hangi verilerin endeksleneceğini ve sunucularında saklanacağını belirtmek için kullanılır. Böylece [GraphQL](https://graphql.org/) kullanarak bu verilere istediğiniz zaman sorgu yapabilirsiniz. -[Graph Düğümü](https://github.com/graphprotocol/graph-node) artık Arweave protokolündeki verileri indeksleyebiliyor. Mevcut entegrasyon Arweave'i yalnızca bir blok zinciri (bloklar ve işlemler) olarak indeksliyor, ancak henüz depolanan dosyaları indekslemiyor. +[Graph Düğümü](https://github.com/graphprotocol/graph-node) artık Arweave protokolündeki verileri endeksleyebiliyor. Mevcut entegrasyon yalnızca Arweave'i bir blokzinciri olarak (bloklar ve işlemler) endekslemekte olup, henüz depolanan dosyaları endekslememektedir. ## Bir Arweave Subgraph'ı Oluşturma Arweave Subgraphları oluşturabilmek ve dağıtabilmek için iki pakete ihtiyacınız vardır: -1. `@graphprotocol/graph-cli` 'nin 0.30.2 sürümünün üstü - Bu, subgraphları oluşturmak ve dağıtmak için kullanılan bir komut satırı aracıdır. `npm` kullanarak indirmek için [buraya tıklayın](https://www.npmjs.com/package/@graphprotocol/graph-cli). -2. `@graphprotocol/graph-ts`'in 0.27.0 sürümünün üstü - Bu, subgraph'a özgü tiplerin bulunduğu bir kütüphanedir. `npm` kullanarak indirmek için [buraya tıklayın](https://www.npmjs.com/package/@graphprotocol/graph-ts). +1. `@graphprotocol/graph-cli` 0.30.2 sürümünün üzerinde - Bu, subgraph'ler oluşturmak ve dağıtmak için kullanılan bir komut satırı aracıdır. [Buraya] (https://www.npmjs.com/package/@graphprotocol/graph-cli) tıklayarak npm kullanarak indirebilirsiniz. +2. `@graphprotocol/graph-ts` 0.27.0 sürümünün üzerinde - Bu, subgraph'e özgü türler içeren bir kütüphanedir. [Buraya] (https://www.npmjs.com/package/@graphprotocol/graph-ts) tıklayarak npm kullanarak indirebilirsiniz. ## Subgraph'ın bileşenleri Bir subgraph'ın üç bileşeni vardır: -### 1. Manifest - `subgraph.yaml` +### 1. Manifesto - `subgraph.yaml` İlgilenilen veri kaynaklarını ve bunların nasıl işlenmesi gerektiğini tanımlar. Arweave yeni bir veri kaynağı türüdür. @@ -40,9 +40,9 @@ Bir subgraph'ın üç bileşeni vardır: Burada, GraphQL kullanarak Subgraph'ınızı indeksledikten sonra hangi verileri sorgulayabilmek istediğinizi tanımlarsınız. Bu aslında, modelin bir istek gövdesinin yapısını tanımladığı bir API modeline benzer. -Arweave subgraphları için gereksinimler [mevcut dokümantasyonda](/developing/creating-a-subgraph/#the-graphql-schema) ele alınmıştır. +Arweave subgraph'leri için gereksinimler [mevcut dokümanlarda](/developing/creating-a-subgraph/#the-graphql-schema) ele alınmıştır. -### 3. AssemblyScript Eşleştirmeleri - `mapping.ts` +### 3. AssemblyScript Eşlemeleri - `mapping.ts` Bu, birisi sizin etkinliklerini gözlemlediğiniz veri kaynaklarıyla etkileşimde bulunduğunda verinin nasıl alınması ve depolanması gerektiğini belirleyen mantıktır. Veri çevrilir ve belirttiğiniz şemaya göre depolanır. @@ -55,7 +55,7 @@ $ graph build # AssemblyScript dosyalarından Web Assembly oluşturur ve tüm su ## Subgraph Manifest Tanımı -Subgraph manifesti `subgraph.yaml`, subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Bir Arweave subgraph'ı özelinde örnek bir subgraph manifesti için aşağıya bakınız: +Subgraph manifestosu subgraph.yaml; subgraph'in veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Aşağıda, bir Arweave subgraph'i için örnek bir subgraph manifestosu bulunmaktadır: ```yaml specVersion: 0.0.5 @@ -82,30 +82,30 @@ dataSources: - handler: handleTx # eşleştirme dosyasındaki fonksiyon adı ``` -- Arweave subgraphları yeni bir veri kaynağı türünü tanıtır (`arweave`) -- The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` +- Arweave subgraph'leri, yeni bir veri kaynağı türü (`arweave`) sunar +- Ağ, sağlayıcı Graph Düğümü üzerindeki bir ağa karşılık gelmelidir. Subgraph Studio'da, Arweave'in ana ağı `arweave-mainnet` olarak tanımlanır - Arweave veri kaynakları, bir Arweave cüzdanının genel anahtarı olan opsiyonel bir source.owner alanı sunar Arweave veri kaynakları iki tür işleyiciyi destekler: -- `blockHandlers` - Her yeni Arweave bloğunda çalıştırılır. source.owner gerekli değildir. -- `transactionHandlers` - Veri kaynağının `source.owner`'ının sahibi olduğu her işlemde çalıştırılır. Şu anda `transactionHandlers` için bir sahip gereklidir. Kullanıcılar tüm işlemleri işlemek istiyorlarsa `source.owner` olarak "" sağlamalıdırlar +- `blockHandlers` - Her yeni Arweave blokunda çalıştırılır. source.owner belirtilmesi gerekmez. +- `transactionHandlers` - Veri kaynağının sahibinin source.owner olduğu her işlemde çalıştırılır. Şu anda ` transactionHandlers` için bir sahip (owner) gereklidir. Kullanıcılar tüm işlemleri gerçekleştirmek istiyorlarsa `source.owner` olarak boş dize "" sağlamalıdırlar > source.owner, sahibin adresi veya Genel Anahtarı olabilir. - +> > İşlemler Arweave permaweb'in yapı taşlarıdır ve son kullanıcılar tarafından oluşturulan nesnelerdir. - -> Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. +> +> Not: [Irys (önceden Bundlr)](https://irys.xyz/) işlemleri henüz desteklenmemektedir. ## Şema Tanımı -Şema tanımı, ortaya çıkan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri tanımlar. Bu, orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla ayrıntı [burada](/developing/creating-a-subgraph/#the-graphql-schema) bulunmaktadır. +Şema tanımı, oluşan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri tanımlar. Bu ilişki orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla detay [burada](/developing/creating-a-subgraph/#the-graphql-schema) bulunmaktadır. ## AssemblyScript Eşlemeleri -Olayları işlemek için işleyiciler [AssemblyScript](https://www.assemblyscript.org/) içinde yazılmıştır. +Olayları işlemek için kullanılan işleyiciler [AssemblyScript](https://www.assemblyscript.org/) ile yazılmıştır. -Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +Arweave endeksleme, [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) için Arweave'e özgü veri türlerini tanıtır. ```tsx class Block { @@ -146,21 +146,21 @@ class Transaction { } ``` -Blok işleyicileri bir `Block` alırken, işlemler bir `Transaction` alır. +Blok işleyiciler bir `Block` alırken, işlemler bir `Transaction` alır. -Bir Arweave Subgraph'ının eşleştirmelerini yazmak, bir Ethereum Subgraph'ının eşleştirmelerini yazmaya çok benzerdir. Daha fazla bilgi için [buraya](/developing/creating-a-subgraph/#writing-mappings) tıklayın. +Arweave Subgraph'inin eşleştirmelerini yazmak, bir Ethereum Subgraph'inin eşleştirmelerini yazmaya oldukça benzerdir. Daha fazla bilgi için [buraya](/developing/creating-a-subgraph/#writing-mappings) tıklayın. -## Deploying an Arweave Subgraph in Subgraph Studio +## Subgraph Studio'da Arweave Subgraph'i Dağıtma -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. +Subgraph Studio panelinizde subgraph'iniz oluşturulduktan sonra onu `graph deploy` CLI komutunu kullanarak dağıtabilirsiniz. ```bash -graph deploy --access-token +graph deploy --access-token ``` ## Arweave Subgraph'ını Sorgulama -Arweave subgraphları için GraphQL uç noktası, mevcut API arayüzü ile şema tanımı tarafından belirlenir. Daha fazla bilgi için lütfen [GraphQL API dökümantasyonunu](/subgraphs/querying/graphql-api/) ziyaret edin. +Arweave subgraph'leri için GraphQL endpoint'i, mevcut API arayüzüyle şema tanımına göre belirlenir. Daha fazla bilgi için [GraphQL API dokümantasyonuna](/subgraphs/querying/graphql-api/) göz atın. ## Örnek Subgraph'ler @@ -168,7 +168,7 @@ Arweave subgraphları için GraphQL uç noktası, mevcut API arayüzü ile şema - [Arweave için örnek subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## SSS +## FAQ ### Bir subgraph Arweave ve diğer zincirleri indeksleyebilir mi? @@ -188,52 +188,52 @@ source.owner kullanıcının genel anahtarı veya hesap adresi olabilir. ### Mevcut şifreleme formatı nedir? -Veri genellikle eşleştirmelere Bayt olarak aktarılır ve doğrudan depolanırsa subgraph'ta `hex` formatında döndürülür (örn. blok ve işlem hashları). [Arweave Explorer](https://viewblock.io/arweave/) gibi blok gezginlerinde görüntülenenlerle eşleştirmek için eşleştirmelerinizde `base64` veya `base64 URL`-güvenli biçime dönüştürmek isteyebilirsiniz. +Veriler genellikle Bytes olarak eşleştirmelere aktarılır ve doğrudan kaydedilirse subgraph'te hex formatında (ör. blok ve işlem hash'leri) döner. [Arweave Explorer](https://viewblock.io/arweave/) gibi blok gezginlerinde görüntülenenlerle denkleştirmek için eşlemelerinizi `base64` veya `base64 URL`-safe biçimine dönüştürmek isteyebilirsiniz. -Aşağıdaki `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` yardımcı fonksiyonu kullanılabilir ve `graph-ts`'ye eklenecektir: +Aşağıdaki `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` yardımcı fonksiyonu kullanılabilir. Bu fonksiyon, `graph-ts`'e eklenecektir: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From b5168a1fe30bc3d23b2f761a0379e4803876d451 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:12 -0500 Subject: [PATCH 0557/1534] New translations arweave.mdx (Ukrainian) --- .../pages/uk/subgraphs/cookbook/arweave.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/arweave.mdx b/website/src/pages/uk/subgraphs/cookbook/arweave.mdx index dff298483612..6b54757440a0 100644 --- a/website/src/pages/uk/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/arweave.mdx @@ -92,9 +92,9 @@ Arweave data sources support two types of handlers: - `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Визначення схеми @@ -103,7 +103,7 @@ Schema definition describes the structure of the resulting subgraph database and ## AssemblyScript Mappings -Обробники для виконання подій написані на мові [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 0208d4bc2920d5396cbc81b7720c40321b235537 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:13 -0500 Subject: [PATCH 0558/1534] New translations arweave.mdx (Chinese Simplified) --- .../pages/zh/subgraphs/cookbook/arweave.mdx | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/arweave.mdx b/website/src/pages/zh/subgraphs/cookbook/arweave.mdx index ae6a399817d8..24eafa5cdebe 100644 --- a/website/src/pages/zh/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/arweave.mdx @@ -17,32 +17,32 @@ Arweave 已经构建了许多库,用于将协议集成到许多不同的编程 ## Arweave子图是什么? -Graph 允许您构建称为“子图 ”的自定义开放 API。子图用于告诉索引人(服务器操作员) 在区块链上索引哪些数据,并保存在他们的服务器上,以便您能够在任何时候使用 [GraphQL](https://graphql.org/) 查询它。 +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph节点](https://github.com/graphprotocol/graph-node) 现在能够在 Arweave 协议上索引数据。当前的集成只是索引 Arweave 作为一个区块链(区块和交易) ,它还没有索引存储的文件。 +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## 构建 Arweave 子图 为了能够构建和部署 Arweave 子图,您需要两个包: -1. `@graphprotocol/graph-cli` 高于0.30.2版本-这是一个用于构建和部署子图的命令行工具。[点击这里](https://www.npmjs.com/package/@graphprotocol/graph-cli)下载使用 `npm`。 -2. `@ graph protocol/graph-ts` 0.27.0以上版本-这是子图特定类型的库。[点击这里](https://www.npmjs.com/package/@graphprotocol/graph-ts)下载使用 `npm`。 +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## 子图的组成部分 一个子图有三个组成部分: -### 1. 数据源明细 - `subgraph.yaml` +### 1. Manifest - `subgraph.yaml` 定义感兴趣的数据源,以及如何处理它们。Arweave是一种新型数据源。 -### 2. 数据查询结构- `schema.graphql` +### 2. Schema - `schema.graphql` 在这里,您可以定义在使用 GraphQL 索引子图之后希望能够查询的数据。这实际上类似于 API 的模型,其中模型定义了请求主体的结构。 -[现有文档](/developing/creating-a-subgraph/#the-graphql-schema)涵盖了对 Arweave 子图的需求。 +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. AssemblyScript 映射 - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` 这种逻辑决定了当有人与您正在监听的数据源进行交互时,应该如何检索和存储数据。数据将被翻译并根据您列出的模式进行存储。 @@ -53,9 +53,9 @@ $ graph codegen # 从清单中标识的模式文件生成类型 $ graph build # 从 AssemblyScript 文件生成 Web Assembly,并在 /build 文件夹中准备所有子图文件 ``` -## 子图数据源明细定义 +## 子图清单定义 -子图清单`subgraph.yaml` 标识子图的数据源、感兴趣的触发器以及应该响应这些触发器而运行的函数。下面是 Arweave 子图的子图清单示例: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,28 +82,28 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave子图引入了一种新的数据源(`arweave`) +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave 数据源引入了一个可选的 source. owner 字段,它是 Arweave 钱包的公钥 Arweave 数据源支持两种类型的处理程序: -- `blockHandlers` 在每个新的 Arweave 区块上运行,不需要 source. owner。 -- `transactionHandlers` - 在数据源的`source.owner` 是所有者的每个交易上运行。目前, `transactionHandlers`需要一个所有者,如果用户想要处理所有交易,他们应该提供""作为 `source.owner` +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > Source.Owner 可以是所有者的地址,也可以是他们的公钥。 - +> > 交易是 Arweave permaweb 的构建区块,它们是终端用户创建的对象。 - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. -## 数据查询结构定义 +## 模式定义 -数据查询结构定义描述了生成的子图数据库的结构以及实体之间的关系,无需与原始数据源有关。[这里](/developing/creating-a-subgraph/#the-graphql-schema)有关于子图模式定义的更多细节。 +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript 映射 -处理事件的处理程序是用 [AssemblyScript](https://www.assemblyscript.org/) 编写的。 +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -146,9 +146,9 @@ class Transaction { } ``` -区块处理程序接收`Block`,而交易接收`Transaction`.。 +Block handlers receive a `Block`, while transactions receive a `Transaction`. -写 Arweave 子图的映射与写 Etherum 子图的映射非常相似。了解更多信息,请点击[这里](/developing/creating-a-subgraph/#writing-mappings)。 +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). ## Deploying an Arweave Subgraph in Subgraph Studio @@ -160,15 +160,15 @@ graph deploy --access-token ## 查询 Arweave 子图 -Arweave 子图的 GraphQL 端点由模式定义和现有的 API 接口决定。有关更多信息,请访问 [GraphQLAPI 文档](/subgraphs/querying/graphql-api/)。 +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## 示例子图 下面是一个子图的例子,以供参考: -- [Arweave 的子图示例](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## 常见问题 +## FAQ ### 子图可以索引 Arweave 和其他链吗? @@ -188,52 +188,52 @@ Source.owner可以是用户的公钥或账户地址。 ### 当前的加密格式是什么? -数据通常以字节的形式传递到映射中,如果直接存储字节,则以`十六进制`格式(例如,区块和和交易hashes)返回。您可能希望在映射中转换为 `base64`或 `base64 URL` 安全格式,以便与 [Arweave Explorer](https://viewblock.io/arweave/) 等区块浏览器中显示的内容相匹配。 +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -可以使用以下 `bytesToBase64(字节: Uint8Array,urlSafe: boolean): string` 辅助函数,并将其添加到 `graph-ts`: +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 77e7615198cbcc792a3f8ee24d9edc25731aa431 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:14 -0500 Subject: [PATCH 0559/1534] New translations arweave.mdx (Urdu (Pakistan)) --- .../pages/ur/subgraphs/cookbook/arweave.mdx | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/arweave.mdx b/website/src/pages/ur/subgraphs/cookbook/arweave.mdx index c54fa9627492..035056b7f5a6 100644 --- a/website/src/pages/ur/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/arweave.mdx @@ -12,37 +12,37 @@ title: بناۓ گئے سب گرافز آرویو(Arweave) پر آرویو(Arweave) نے پہلے ہی بہت سی کتابخانےاں تیار کی ہیں جو مختلف پروگرامنگ زبانوں میں پروٹوکول کو اندر ملانے کے لئے بنائی گئی ہیں۔ مزید معلومات کے لئے آپ یہ چیک کر سکتے ہیں: -- [آروکی (Arwiki)](https://arwiki.wiki/#/en/main) -- [آرویو(Arweave) وسائل](https://www.arweave.org/build) +- [Arwiki](https://arwiki.wiki/#/en/main) +- [Arweave Resources](https://www.arweave.org/build) ## آرویو(Arweave) سب گرافز کیا ہوتے ہیں؟ -گراف آپ کو 'سب گرافز' کے ذریعے مخصوص اوپن اے پی آئیز(APIs) بنانے کی اجازت دیتا ہے۔ سب گرافز کا استعمال انڈیکسرز (سرور آپریٹرز) کو بتانے کے لئے ہوتا ہے کہ کس بلاکچین پر کون سے ڈیٹا کو انڈیکس کرنا ہے اور انہیں اپنے سرورز میں محفوظ کرنا ہے تاکہ آپ کبھی بھی اس ڈیٹا کی استعلام کر سکیں، جسے آپ [GraphQL](https://graphql.org/) کے ذریعے کر سکتے ہیں۔ +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) اب آرویو(Arweave) پروٹوکول پر ڈیٹا انڈیکس کر سکتا ہے۔ موجودہ انٹیگریشن صرف آرویو(Arweave) کو بلاکچین (بلاک اور ٹرانزیکشنز) کے طور پر انڈیکس کرتی ہے، یہ ابھی تک ذخیرہ شدہ فائلز کو انڈیکس نہیں کر رہی ہے۔ +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## آرویو(Arweave) سب گراف بنانا آرویو کے سب گراف بنانے اور تعینات کرنے کے لئے،آپ کو دو پیکجوں کی ضرورت ہے: -1. `@graphprotocol/graph-cli` اوپر والے ورژن 0.30.2 - یہ سب گراف کی تعمیر اور تعیناتی کے لیے ایک کمانڈ لائن ٹول ہے۔ `npm` کا استعمال کرتے ہوئے ڈاؤن لوڈ کرنے کے لیے [یہاں کلک کریں](https://www.npmjs.com/package/@graphprotocol/graph-cli). -2. `@graphprotocol/graph-ts` اوپر والے ورژن 0.27.0 کے لیۓ.یہ سب گراف مخصوص اقسام کی لائبریری ہے۔`npm` استعمال کرکے ڈاؤن لوڈ کرنے کے لیے[یہاں کلک کریں](https://www.npmjs.com/package/@graphprotocol/graph-ts). +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## سب گراف کے حصے سب گراف کے تین حصے ہیں: -### 1- ضاہر - `subgraph.yaml` +### 1. Manifest - `subgraph.yaml` دلچسپی کے ڈیٹا کے ذرایع کو بیان کرتا ہے،اور کیسے ان پر کاروائ کی جاۓ۔ آرویو ایک نئ طرح کا ڈیٹا کا ذریعہ ہے. -### 2- سکیما - `schema.graphql` +### 2. Schema - `schema.graphql` یہاں آپ بیان کرتے ہیں کے کونسا ڈیٹا آپ کے سب گراف کا کیوری گراف کیو ایل کا استعمال کرتے ہوۓ کر سکے۔یہ دراصل اے پی آی(API) کے ماڈل سے ملتا ہے،جہاں ماڈل درخواست کے جسم کے ڈھانچے کو بیان کرتا ہے. -آرویو سب گراف کے تقاضوں کا احاطہ [موجودہ دستاویزات](/developing/creating-a-subgraph/#the-graphql-schema) سے کیا جاتا ہے. +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3- اسمبلی اسکرپٹ کی نقشہ سازی - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` یہ وہ منطق جو اس بات کا پتہ لگاتا ہے کے کیسے ڈیٹا کو بازیافت اور مہفوظ کیا جاۓ جب کوئ اس ڈیٹا کے ذخیرہ سے تعامل کرے جسے آپ سن رہے ہیں۔اس ڈیٹا کا ترجمہ کیا جاتا ہے اور آپ کے درج کردہ اسکیما کی بنیاد پر مہفوظ کیا جاتا ہے. @@ -55,7 +55,7 @@ $graph build # اسمبلی سکرپٹ فائلوں سے ویب اسمبلی ت ## سب گراف مینی فیسٹ کی تعریف -ظاہر سب گراف `subgraph.yaml` سب گراف کے ڈیٹا کے زرائع کا پتہ لگاتا ہے،دلچسپی کے محرکات, اور وہ افعال جو ان محرکات کے جواب میں چلائے جائیں۔ ذیل میں ملاحظہ کریں مثال ظاہر سب گراف ایک آرویو سب گراف کے لیے: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,28 +82,28 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- آرویو سب گراف ایک نئی قسم کے ڈیٹا سورس (`arweave`) کو متعارف کراتے ہیں۔ +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - آرویو ڈیٹا کے ذرائع ایک اختیاری source.owner فیلڈ متعارف کراتے ہیں، جو آرویو والیٹ کی عوامی کلید ہے آرویو ڈیٹا کے ذرائع دو قسم کے ہینڈلرز کو سپورٹ کرتے ہیں: -- `blockHandlers` - ہر نئے آرویو بلاک پر چلتے ہیں۔ کسی source.owner کی ضرورت نہیں ہے. -- `transactionHandlers` - ہر اس ٹرانزیکشن پر چلائیں جہاں ڈیٹا سورس کا `source.owner` مالک ہو۔فی الحال `transactionHandlers` کے لیے ایک مالک درکار ہے، اگر صارف تمام لین دین پر کارروائی کرنا چاہتے ہیں تو انہیں "" بطور `source.owner` فراہم کرنا ہو گا +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > Source.owner مالک کا پتہ، یا ان کی عوامی کلید ہو سکتا ہے. - +> > ٹرانزیکشنز آرویو پرما ویب کے تعمیراتی بلاکس ہیں اور یہ آخری صارفین کے ذریعہ تخلیق کردہ اشیاء ہیں. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## اسکیما کی تعریف -سکیما کی تعریف نتیجے میں سب گراف ڈیٹا بیس کی ساخت اور اداروں کے درمیان تعلقات کو بیان کرتی ہے۔ یہ اصل ڈیٹا ماخذ کے بارے میں علمی ہے۔ ذیلی گراف اسکیما کی تعریف کے بارے میں مزید تفصیلات [یہاں](/developing/creating-a-subgraph/#the-graphql-schema) ہیں. +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). -## اسمبلی اسکرپٹ کی میپنگ +## اسمبلی اسکرپٹ سب میپنک -پروسیسنگ ایونٹس کے ہینڈلرز [اسمبلی اسکرپٹ](https://www.assemblyscript.org/) میں لکھے گئے ہیں. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -146,9 +146,9 @@ class Transaction { } ``` -بلاک ہینڈلرز کو ایک `Block` ملتا ہے، جب کہ لین دین کو `Transaction` ملتا ہے. +Block handlers receive a `Block`, while transactions receive a `Transaction`. -آرویو سب گراف کی میپنگ لکھنا ایتھریم سب گراف کی میپنگ لکھنے کے مترادف ہے۔ مزید معلومات کے لیے، [یہاں](/developing/creating-a-subgraph/#writing-mappings) کلک کریں. +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). ## Deploying an Arweave Subgraph in Subgraph Studio @@ -160,13 +160,13 @@ graph deploy --access-token ## آرویو سب گراف سے کیوری کرنا -آرویو سب گرافس کے لیے گراف کیو ایل اینڈ پوائنٹ کا تعین موجودہ API انٹرفیس کے ساتھ، اسکیما تعریف سے کیا جاتا ہے۔ مزید معلومات کے لیے براہ کرم [گراف کیو ایل API دستاویزات](/subgraphs/querying/graphql-api/) ملاحظہ کریں. +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## سب گراف کی مثال حوالہ کے لیے سب گراف کی ایک مثال یہ ہے: -- [آرویو کے لیے سب گراف کی مثال](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ @@ -188,52 +188,52 @@ Source.owner صارف کی عوامی کلید یا اکاؤنٹ ایڈریس ہ ### موجودہ خفیہ کاری کا فارمیٹ کیا ہے؟ -ڈیٹا کو عام طور پر میپنگ میں بائٹس کے طور پر منتقل کیا جاتا ہے، جسے براہ راست ذخیرہ کرنے کی صورت میں ذیلی گراف میں `hex` فارمیٹ (مثال کے طور پر بلاک اور ٹرلین دین ہیش) میں واپس کیا جاتا ہے۔ آپ اپنی میپنگ میں ایک `base64` یا `base64 URL`-محفوظ فارمیٹ میں تبدیل کرنا چاہیں گے،بلاک ایکسپلوررز جیسے [آرویو ایکسپلورر](https://viewblock.io/arweave/) میں دکھائی جانے والی چیزوں سے میل کرنے کے لیے. +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -درج ذیل `bytesToBase64(bytes: Uint8Array، urlSafe: boolean): string` مددگار فنکشن استعمال کیا جا سکتا ہے، اور اسے `graph-ts` میں شامل کیا جائے گا: +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From be58d2bab9be941d66eb4e3ad12afb19d459d931 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:15 -0500 Subject: [PATCH 0560/1534] New translations arweave.mdx (Vietnamese) --- .../pages/vi/subgraphs/cookbook/arweave.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/arweave.mdx b/website/src/pages/vi/subgraphs/cookbook/arweave.mdx index b5f0521e9ca1..2372025621d1 100644 --- a/website/src/pages/vi/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/arweave.mdx @@ -92,9 +92,9 @@ Arweave data sources support two types of handlers: - `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > The source.owner can be the owner's address, or their Public Key. - +> > Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## Schema Definition @@ -168,7 +168,7 @@ Here is an example subgraph for reference: - [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## CÂU HỎI THƯỜNG GẶP +## FAQ ### Can a subgraph index Arweave and other chains? @@ -194,46 +194,46 @@ The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helpe ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From b3b9f515d1f74cc25dcb5939499e0b1b5adb4bff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:16 -0500 Subject: [PATCH 0561/1534] New translations arweave.mdx (Marathi) --- .../pages/mr/subgraphs/cookbook/arweave.mdx | 116 +++++++++--------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/arweave.mdx b/website/src/pages/mr/subgraphs/cookbook/arweave.mdx index 1c5300cce813..2b43324539b9 100644 --- a/website/src/pages/mr/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/arweave.mdx @@ -13,36 +13,36 @@ Arweave प्रोटोकॉल विकसकांना कायमस अनेक वेगवेगळ्या प्रोग्रामिंग भाषांमध्ये प्रोटोकॉल समाकलित करण्यासाठी Arweave ने आधीच असंख्य लायब्ररी तयार केल्या आहेत. अधिक माहितीसाठी तुम्ही तपासू शकता: - [Arwiki](https://arwiki.wiki/#/en/main) -- [Arweave संसाधने](https://www.arweave.org/build) +- [Arweave Resources](https://www.arweave.org/build) ## Arweave Subgraphs काय आहेत? -आलेख तुम्हाला "सबग्राफ" नावाचे सानुकूल खुले API तयार करण्याची परवानगी देतो. ब्लॉकचेनवर कोणता डेटा इंडेक्स करायचा आहे हे इंडेक्सर्स (सर्व्हर ऑपरेटर) यांना सांगण्यासाठी सबग्राफचा वापर केला जातो आणि त्यांच्या सर्व्हरवर सेव्ह केला जातो जेणेकरून तुम्ही ते वापरून कधीही क्वेरी करू शकता वापरून [GraphQL](https://graphql.org/). +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) आता Arweave प्रोटोकॉलवर डेटा अनुक्रमित करण्यास सक्षम आहे. सध्याचे एकत्रीकरण केवळ ब्लॉकचेन (ब्लॉक्स आणि व्यवहार) म्हणून Arweave अनुक्रमित करत आहे, ते अद्याप संग्रहित फायली अनुक्रमित करत नाही. +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## Arweave Subgraph तयार करणे Arweave Subgraphs तयार आणि तैनात करण्यात सक्षम होण्यासाठी, तुम्हाला दोन पॅकेजेसची आवश्यकता आहे: -1. `@graphprotocol/graph-cli` वरील आवृत्ती 0.30.2 - हे सबग्राफ तयार करण्यासाठी आणि तैनात करण्यासाठी कमांड-लाइन साधन आहे. `npm` वापरून डाउनलोड करण्यासाठी [येथे क्लिक करा](https://www.npmjs.com/package/@graphprotocol/graph-cli). -2. `@graphprotocol/graph-ts` वरील आवृत्ती 0.27.0 - ही सबग्राफ-विशिष्ट प्रकारांची लायब्ररी आहे. `npm` वापरून डाउनलोड करण्यासाठी [येथे क्लिक करा](https://www.npmjs.com/package/@graphprotocol/graph-ts). +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## सबग्राफचे घटक सबग्राफचे तीन घटक आहेत: -### 1. मॅनिफेस्ट - `subgraph.yaml` +### 1. Manifest - `subgraph.yaml` स्वारस्य असलेल्या डेटा स्रोतांची व्याख्या करते आणि त्यांची प्रक्रिया कशी करावी. Arweave हा एक नवीन प्रकारचा डेटा स्रोत आहे. -### 2. स्कीमा - `schema.graphql` +### 2. Schema - `schema.graphql` GraphQL वापरून तुमचा सबग्राफ इंडेक्स केल्यानंतर तुम्ही कोणता डेटा क्वेरी करू इच्छिता ते येथे तुम्ही परिभाषित करता. हे प्रत्यक्षात API च्या मॉडेलसारखेच आहे, जेथे मॉडेल विनंती मुख्य भागाची रचना परिभाषित करते. -Arweave सबग्राफच्या आवश्यकता [विद्यमान दस्तऐवज](/developing/creating-a-subgraph/#the-graphql-schema) द्वारे कव्हर केल्या जातात. +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. असेंबलीस्क्रिप्ट मॅपिंग - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` जेव्हा तुम्ही ऐकत असलेल्या डेटा स्रोतांशी कोणीतरी संवाद साधते तेव्हा डेटा कसा पुनर्प्राप्त आणि संग्रहित केला जावा हे हे तर्कशास्त्र आहे. डेटा अनुवादित केला जातो आणि तुम्ही सूचीबद्ध केलेल्या स्कीमावर आधारित संग्रहित केला जातो. @@ -55,7 +55,7 @@ $ graph build # असेंबलीस्क्रिप्ट फायली ## सबग्राफ मॅनिफेस्ट व्याख्या -सबग्राफ मॅनिफेस्ट `subgraph.yaml` सबग्राफसाठी डेटा स्रोत, स्वारस्य ट्रिगर आणि त्या ट्रिगर्सच्या प्रतिसादात चालवल्या जाणार्‍या फंक्शन्स ओळखतो. अर्वेव्ह सबग्राफसाठी सबग्राफ मॅनिफेस्टच्या उदाहरणासाठी खाली पहा: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,28 +82,28 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- आर्वीव्ह सबग्राफ नवीन प्रकारचे डेटा स्रोत सादर करतात (`arweave`) +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - Arweave डेटा स्रोत पर्यायी source.owner फील्ड सादर करतात, जी Arweave वॉलेटची सार्वजनिक की आहे Arweave डेटा स्रोत दोन प्रकारच्या हँडलरला समर्थन देतात: -- `blockHandlers` - प्रत्येक नवीन Arweave ब्लॉकवर चालवा. source.owner आवश्यक नाही. -- `transactionHandlers` - डेटा स्रोताचा `source.owner` मालक असेल अशा प्रत्येक व्यवहारावर चालवा. सध्या `transactionHandlers` साठी मालक आवश्यक आहे, जर वापरकर्त्यांना सर्व व्यवहारांवर प्रक्रिया करायची असेल तर त्यांनी हे `source.owner` म्हणून प्रदान केले पाहिजे +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > source.owner हा मालकाचा पत्ता किंवा त्यांची सार्वजनिक की असू शकतो. - +> > व्यवहार हे Arweave permaweb चे बिल्डिंग ब्लॉक्स आहेत आणि ते अंतिम वापरकर्त्यांनी तयार केलेल्या वस्तू आहेत. - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## स्कीमा व्याख्या -स्कीमा व्याख्या परिणामी सबग्राफ डेटाबेसची रचना आणि संस्थांमधील संबंधांचे वर्णन करते. हे मूळ डेटा स्रोताचे अज्ञेय आहे. सबग्राफ स्कीमा व्याख्या [येथे](/developing/creating-a-subgraph/#the-graphql-schema) अधिक तपशील आहेत. +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## असेंबलीस्क्रिप्ट मॅपिंग -इव्हेंटवर प्रक्रिया करण्यासाठी हँडलर [AssemblyScript](https://www.assemblyscript.org/) मध्ये लिहिलेले आहेत. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -146,9 +146,9 @@ class Transaction { } ``` -ब्लॉक हँडलर्सना `ब्लॉक` मिळतो, तर व्यवहारांना `व्यवहार` मिळतो. +Block handlers receive a `Block`, while transactions receive a `Transaction`. -Arweave Subgraph चे मॅपिंग लिहिणे हे Ethereum Subgraph चे मॅपिंग लिहिण्यासारखेच आहे. अधिक माहितीसाठी, क्लिक करा [येथे](/developing/creating-a-subgraph/#writing-mappings). +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). ## Deploying an Arweave Subgraph in Subgraph Studio @@ -160,13 +160,13 @@ graph deploy --access-token ## प्रश्न करत आहे Arweave सबग्राफ -Arweave subgraphs साठी GraphQL एंडपॉइंट विद्यमान API इंटरफेससह स्कीमा व्याख्येद्वारे निर्धारित केला जातो. अधिक माहितीसाठी कृपया [GraphQL API दस्तऐवज](/subgraphs/querying/graphql-api/) ला भेट द्या. +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## उदाहरणे सबग्राफ संदर्भासाठी येथे एक उदाहरण उपग्राफ आहे: -- [Arweave साठी उदाहरण सबग्राफ](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ @@ -188,52 +188,52 @@ source.owner वापरकर्त्याची सार्वजनिक ### सध्याचे एन्क्रिप्शन स्वरूप काय आहे? -डेटा सामान्यतः मॅपिंगमध्ये बाइट्स म्हणून पास केला जातो, जो थेट संग्रहित केल्यास सबग्राफमध्ये `हेक्स` फॉरमॅटमध्ये (उदा. ब्लॉक आणि ट्रान्झॅक्शन हॅश) परत केला जातो. तुम्हाला कदाचित `बेस64` किंवा `बेस64 URL`-तुमच्या मॅपिंगमधील सुरक्षित फॉरमॅटमध्ये रूपांतरित करायचे असेल, जे ब्लॉक एक्सप्लोरर जसे की [अर्विव्ह एक्सप्लोरर](https://viewblock.io/arweave/) मध्ये प्रदर्शित केले जाते ते जुळण्यासाठी 1>. +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -खालील `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` हेल्पर फंक्शन वापरले जाऊ शकते आणि ते `graph-ts` मध्ये जोडले जाईल: +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 1681ca5524d211fee76ba53116d8dd50c572258c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:17 -0500 Subject: [PATCH 0562/1534] New translations arweave.mdx (Hindi) --- .../pages/hi/subgraphs/cookbook/arweave.mdx | 122 +++++++++--------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/arweave.mdx b/website/src/pages/hi/subgraphs/cookbook/arweave.mdx index 12bd2895b137..b51d9a5405bc 100644 --- a/website/src/pages/hi/subgraphs/cookbook/arweave.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/arweave.mdx @@ -12,37 +12,37 @@ title: आरवीव पर सब-ग्राफ्र्स बनाना अरवीव द्वारा पहले से ही कई लाइब्रेरी विभिन्न प्रोग्रामिंग भाषाओं में विकशित की गई हैं| अधिक जानकारी के लिए आप इनका रुख कर सकते हैं: -- [अरविकी](https://arwiki.wiki/#/en/main) -- [आरवीवे रिसोर्सेज](https://www.arweave.org/build) +- [Arwiki](https://arwiki.wiki/#/en/main) +- [Arweave Resources](https://www.arweave.org/build) ## आरवीवे सब ग्राफ्स क्या हैं? -द ग्राफ की सहायता से आप कस्टम APIs बना सकते हैं जिन्हे सब ग्राफ्स कहा जाता है| सब ग्राफ्स का इस्तेमाल इंडेक्सर्स (सर्वर ऑपेरटर) को यह बताने के लिए इस्तेमाल होता है की ब्लॉकचेन पर कौन सा डाटा इंडेक्स करके अपने सर्वर पर सेव करना है जिससे आप [ GraphQL ](https://graphql.org/) का इस्तेमाल करके कभी भी डाटा क्वेरी कर सकें| +The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). -ग्राफ नोड अब आरवीव प्रोटोकॉल पर डाटा इंडेक्स करने में सक्षम है| फिलाहल इंडेक्सिंग केवल आरवीवे को ब्लॉकचेन (ब्लॉक एवं ट्रांसैक्शन) के तौर पर हो रही है, फिलहाल यह फाइल्स को इंडेक्स नहीं कर रहा| +[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. ## एक आरवीव सब ग्राफ बनाना आरवीवे पर सब ग्राफ बनाने के लिए हमे दो पैकेजेस की जरूरत है: -1. `@graphprotocol/graph-cli` वर्जन 0.30.2 या ऊपर - यह एक कमांड-लाइन टूल है जिसका इस्तेमाल सब ग्राफ्स बनाने एवं डेप्लॉय करने के लिए होता है| `npm` से डाउनलोड करने के लिए [यहां क्लिक करें|](https://www.npmjs.com/package/@graphprotocol/graph-cli) -2. `@graphprotocol/graph-ts` वर्जन 0.27.0 से ऊपर - यह विशिष्ट सब ग्राफ्स के लिए लाइब्रेरी है| `npm` से डाउनलोड करने के लिए [यहां क्लिक करें](https://www.npmjs.com/package/@graphprotocol/graph-ts) | +1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. +2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. ## सब ग्राफ के कॉम्पोनेन्ट सब ग्राफ के तीन कॉम्पोनेन्ट होते हैं: -### 1. मैनिफेस्ट - `subgraph.yaml` +### 1. Manifest - `subgraph.yaml` डाटा का स्रोत्र और उनको प्रोसेस करने के बारे में बताता है| आरवीव एक नए प्रकार का डाटा सोर्स है| -### 2. स्कीमा - `schema.graphql` +### 2. Schema - `schema.graphql` यहाँ आप बताते हैं की आप कौन सा डाटा इंडेक्सिंग के बाद क्वेरी करना चाहते हैं| दरसअल यह एक API के मॉडल जैसा है, जहाँ मॉडल द्वारा रिक्वेस्ट बॉडी का स्ट्रक्चर परिभाषित किया जाता है| -आरवीव सब ग्राफ्स के लिए अनिवार्य चीज़ें इस [डॉक्यूमेंटेशन](/developing/creating-a-subgraph/#the-graphql-schema) में है | +The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. असेंबली स्क्रिप्ट मप्पिंग्स - `mapping.ts` +### 3. AssemblyScript Mappings - `mapping.ts` यह किसी के द्वारा इस्तेमाल किये जा रहे डाटा सोर्स से डाटा को पुनः प्राप्त करने और स्टोर करने के लॉजिक को बताता है| डाटा अनुवादित होकर आपके द्वारा सूचीबद्ध स्कीमा के अनुसार स्टोर हो जाता है| @@ -55,7 +55,7 @@ $ graph build # generates Web Assembly from the AssemblyScript files, and prepar ## सब ग्राफ मैनिफेस्ट की परिभाषा -सब ग्राफ मैनिफेस्ट `subgraph.yaml` द्वारा सब-ग्राफ के डाटा सोर्स, ट्रिगर ऑफ़ इंटरेस्ट, और इन ट्रिगर के जवाब में सिस्टेमाल होने वाले फंक्शन्स की पहचान करता है| आरवीव सब-ग्राफ के लिए सब-ग्राफ मैनिफेस्ट का उदाहरण नीचे देखें: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: ```yaml specVersion: 0.0.5 @@ -82,30 +82,30 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- आरवीव सब-ग्राफ्स के नए प्रकार का डाटा सोर्स लाते हैं (`आरवीव`) +- Arweave subgraphs introduce a new kind of data source (`arweave`) - The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` - अरवीव डाटा सोर्स द्वारा एक वैकल्पिक source.owner फील्ड लाया गया, जो की एक आरवीव वॉलेट का मपब्लिक key है| आरवीव डाटा सोर्स द्वारा दो प्रकार के हैंडलर्स उपयोग किये जा सकते हैं: -- `blockHandlers` - यह हर नए आरवीव ब्लॉक पर चलता है| किसी नए source.owner की जरूरत नहीं है| -- `transactionHandlers` - हर ट्रांसक्शन पर चलता है जहाँ डाटा सोर्स का मालिक `source.owner` हो| फिलहाल `transactionHandlers` के लिए एक ओनर की जरूरत होती है, यदि उपयोगकर्ताओं को सभी ट्रांसक्शन्स प्रोसेस करना है तो उन्हें "" `source.owner` की तरह देना(पास करना) होगा| +- `blockHandlers` - Run on every new Arweave block. No source.owner is required. +- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` > यहां source.owner ओनर का एड्रेस या उनका पब्लिक की हो सकता है| - +> > ट्रांसक्शन आरवीव परमावेब के लिए निर्माण खंड (बिल्डिंग ब्लॉक्स) की तरह होते हैं और एन्ड-यूजर के द्वारा बनाये गए ऑब्जेक्ट होते हैं| - +> > Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. ## स्कीमा की परिभाषा -स्कीमा डेफिनिशन के द्वारा बनने वाले सब-ग्राफ डेटाबेस और उनकी इकाइयों के बीच के रिश्ते के स्ट्रक्चर को बताता है| यह मूल डाटा सोर्स से अज्ञेय है| सब-ग्राफ स्कीमा परिभाषा पर अधिक जानकारी [यहाँ](/developing/creating-a-subgraph/#the-graphql-schema) हैं| +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## असेंबली स्क्रिप्ट मैप्पिंग्स -इवेंट्स को प्रोसेस करने के लिए हैंडलर्स [असेंबली स्क्रिप्ट](https://www.assemblyscript.org/) में लिखे गए हैं| +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). -Arweave इंडेक्सिंग '[AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/)' में Arweave-विशिष्ट डेटा प्रकारों को पेश करती है। +Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). ```tsx class Block { @@ -146,9 +146,9 @@ class Transaction { } ``` -ब्लॉक हैंडलर्स एक `ब्लॉक` प्राप्त करते हाँ, वहीँ ट्रांसक्शन्स `ट्रांसक्शन` हैं| +Block handlers receive a `Block`, while transactions receive a `Transaction`. -आरवीवे सब-ग्राफ की मैपिंग लिखना एथेरेयम के लिए सब-ग्राफ लिखने जैसा ही है| अधिक जानकारी [यहां](/developing/creating-a-subgraph/#writing-mappings) क्लिक करें| +Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). ## Deploying an Arweave Subgraph in Subgraph Studio @@ -160,15 +160,15 @@ graph deploy --access-token ## आरवीव सब-ग्राफ क्वेरी करना -आरवीव सब-ग्राफ्स के लिए GraphQL एन्ड पॉइंट्स स्कीमा की परिभाषा के अनुरूप, API इंटरफ़ेस के अनुसार बनाये जाते हैं| अधिक जानकारी के लिए [GraphQL API documentation](/subgraphs/querying/graphql-api/) पर जाएं| +The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## सब-ग्राफ के उदाहरण सहायता के एक सब-ग्राफ का उदाहरण -- [आरवीव के लिए उदाहरण सब-ग्राफ](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## सामान्य प्रश्न +## FAQ ### क्या एक सब-ग्राफ आरवीव और बाकी चेन्स को इंडेक्स कर सकता है? @@ -188,52 +188,52 @@ graph deploy --access-token ### वर्तमान एन्क्रिप्शन फॉर्मेट क्या है? -आमतौर पर डाटा को मैप्पिंग्स में बाइट्स की तरह पास किया जाता है, जो की यदि सब-ग्राफ में सीधे स्टोर किया जाये तो सब-ग्राफ में `hex` फॉर्मेट में वापस हो जाते हैं (उदाहरण: ब्लॉक और ट्रांसक्शन हाशेस). आप `base64` या `base64 URL` सेफ फॉर्मेट में कन्वर्ट करना चाह सकते हैं, ताकि ब्लॉक एक्सप्लोरर्स जैसे की [Arweave Explorer](https://viewblock.io/arweave/) में दिखाए जाने वाली जानकारी से मेल खा सकें| +Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). -निम्न `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` हेल्पर फंक्शन का उपयोग किया जा सकता है और यह `graph-ts` में जोड़ा जायेगा| +The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; + let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; + + let result = '', i: i32, l = bytes.length; + for (i = 2; i < l; i += 3) { + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; + result += alphabet[bytes[i] & 0x3F]; + } + if (i === l + 1) { // 1 octet yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[(bytes[i - 2] & 0x03) << 4]; + if (!urlSafe) { + result += "=="; + } + } + if (!urlSafe && i === l) { // 2 octets yet to write + result += alphabet[bytes[i - 2] >> 2]; + result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; + result += alphabet[(bytes[i - 1] & 0x0F) << 2]; + if (!urlSafe) { + result += "="; + } + } + return result; } ``` From 8904a208dc8cf3ce151f4743f64be9492951eee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:18 -0500 Subject: [PATCH 0563/1534] New translations avoid-eth-calls.mdx (Romanian) --- website/src/pages/ro/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ro/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ro/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/ro/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From b4a353737034cd8058ad051f914da310c0ff4c1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:19 -0500 Subject: [PATCH 0564/1534] New translations avoid-eth-calls.mdx (French) --- .../fr/subgraphs/cookbook/avoid-eth-calls.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/fr/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..5bde32efcfa2 100644 --- a/website/src/pages/fr/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,24 +1,25 @@ --- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +title: Meilleure Pratique Subgraph 4 - Améliorer la Vitesse d'Indexation en Évitant les eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. +Les `eth_calls` sont des appels qui peuvent être faits depuis un subgraph vers un nœud Ethereum. Ces appels prennent un temps considérable pour renvoyer des données, ralentissant ainsi l'indexation. Si possible, concevez des smart contracts pour émettre toutes les données dont vous avez besoin afin de ne pas avoir à utiliser des `eth_calls`. -## Why Avoiding `eth_calls` Is a Best Practice +## Pourquoi Éviter les `eth_calls` est une Bonne Pratique -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. +Les subgraphs sont optimisés pour indexer les données des événements émis par les smart contracts. Un subgraph peut également indexer les données provenant d'un `eth_call`, cependant, cela peut considérablement ralentir l'indexation du subgraph car les `eth_call` nécessitent de faire des appels externes aux smart contracts. La réactivité de ces appels dépend non pas du subgraph mais de la connectivité et de la réactivité du nœud Ethereum interrogé. En minimisant ou en éliminant les `eth_call` dans nos subgraphs, nous pouvons améliorer considérablement notre vitesse d'indexation. -### What Does an eth_call Look Like? +### À quoi ressemble un eth_call ? -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: +Les `eth_calls` sont souvent nécessaires lorsque les données requises pour un subgraph ne sont pas disponibles par le biais d'événements émis. Par exemple, considérons un scénario où un subgraph doit identifier si les tokens ERC20 font partie d'un pool spécifique, mais le contrat n'émet qu'un événement `Transfer` de base et n'émet pas un événement contenant les données dont nous avons besoin : ```yaml event Transfer(address indexed from, address indexed to, uint256 value); ``` -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: +Supposons que l'appartenance au pool des tokens soit déterminée par une variable d'état nommée `getPoolInfo`. Dans ce cas, nous devrions utiliser un `eth_call` pour interroger ces données : ```typescript import { Address } from '@graphprotocol/graph-ts' @@ -28,10 +29,10 @@ import { TokenTransaction } from '../generated/schema' export function handleTransfer(event: Transfer): void { let transaction = new TokenTransaction(event.transaction.hash.toHex()) - // Bind the ERC20 contract instance to the given address: + // Liez l'instance du contrat ERC20 à l'adresse donnée: let instance = ERC20.bind(event.address) - // Retrieve pool information via eth_call + // Récupérez les informations du pool via eth_call let poolInfo = instance.getPoolInfo(event.params.to) transaction.pool = poolInfo.toHexString() @@ -43,17 +44,17 @@ export function handleTransfer(event: Transfer): void { } ``` -This is functional, however is not ideal as it slows down our subgraph’s indexing. +Cela fonctionne, mais ce n'est pas idéal car cela ralentit l'indexation de notre subgraph. -## How to Eliminate `eth_calls` +## Comment Éliminer les `eth_calls` -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: +Idéalement, le smart contract devrait être mis à jour pour émettre toutes les données nécessaires dans les événements. Par exemple, modifier le smart contract pour inclure les informations du pool dans l'événement pourrait éliminer le besoin de `eth_calls`: ``` event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); ``` -With this update, the subgraph can directly index the required data without external calls: +Avec cette mise à jour, le subgraph peut indexer directement les données requises sans appels externes : ```typescript import { Address } from '@graphprotocol/graph-ts' @@ -72,17 +73,17 @@ export function handleTransferWithPool(event: TransferWithPool): void { } ``` -This is much more performant as it has eliminated the need for `eth_calls`. +Ceci est beaucoup plus performant car cela a éliminé le besoin de `eth_calls`. -## How to Optimize `eth_calls` +## Comment Optimiser les `eth_calls` -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. +Si la modification du smart contract n'est pas possible et que les `eth_calls` sont nécessaires, lisez "[Améliorer les Performances d'Indexation du Subgraph Facilement : Réduire les eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” par Simon Emanuel Schmid pour apprendre diverses stratégies sur la façon d'optimiser les `eth_calls`. -## Reducing the Runtime Overhead of `eth_calls` +## Réduire le Surcharge d'Exécution des `eth_calls` -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. +Pour les `eth_calls` qui ne peuvent pas être éliminés, la surcharge d'exécution qu'ils introduisent peut être minimisée en les déclarant dans le manifeste. Lorsque `graph-node` traite un bloc, il exécute tous les `eth_calls` déclarés en parallèle avant que les gestionnaires (handlers) soient exécutés. Les appels qui ne sont pas déclarés sont exécutés séquentiellement lorsque les gestionnaires sont exécutés. L'amélioration de la durée d'exécution vient du fait que les appels sont effectués en parallèle plutôt que séquentiellement - cela aide à réduire le temps total passé en appels mais ne l'élimine pas complètement. -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write +Actuellement, les `eth_calls` ne peuvent être déclarés que pour les gestionnaires d'événements. Dans le manifeste, écrivez ```yaml event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) @@ -91,11 +92,11 @@ calls: ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) ``` -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. +La partie mise en évidence en jaune est la déclaration d'appel. La partie avant le deux-points est simplement une étiquette de texte utilisée uniquement pour les messages d'erreur. La partie après le deux-points a la forme `Contract[address].function(params)`. Les valeurs permises pour l'adresse et les paramètres sont `event.address` et `event.params.`. -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. +Le handler lui-même accède au résultat de ce `eth_call` exactement comme dans la section précédente en se liant au contrat et en effectuant l'appel. graph-node met en cache les résultats des `eth_calls` déclarés en mémoire et l'appel depuis le handler récupérera le résultat depuis ce cache en mémoire au lieu d'effectuer un appel RPC réel. -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. +Note : Les eth_calls déclarés ne peuvent être effectués que dans les subgraphs avec specVersion >= 1.2.0. ## Conclusion From 25ad1c7affa2034f7b70a1817534baf7b60576fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:20 -0500 Subject: [PATCH 0565/1534] New translations avoid-eth-calls.mdx (Spanish) --- website/src/pages/es/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/es/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/es/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/es/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/es/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 31bd0de3152109c6fe59f42475433a9781ad34f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:21 -0500 Subject: [PATCH 0566/1534] New translations avoid-eth-calls.mdx (Arabic) --- website/src/pages/ar/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ar/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ar/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/ar/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From ee4c5e4ddc0c852ac122f55dcb0554a5c8fce24b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:22 -0500 Subject: [PATCH 0567/1534] New translations avoid-eth-calls.mdx (Czech) --- website/src/pages/cs/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/cs/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/cs/subgraphs/cookbook/avoid-eth-calls.mdx index da01b6cad361..49367dc49eaa 100644 --- a/website/src/pages/cs/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Doporučený postup pro podgraf 4 - Zlepšení rychlosti indexování vyhnutím se eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 4f3a619182dcacdcb0e5d1be3d64ec51036e2c7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:23 -0500 Subject: [PATCH 0568/1534] New translations avoid-eth-calls.mdx (German) --- website/src/pages/de/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/de/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/de/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/de/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/de/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From b7bbfcb25bf9cc7a3aad5e6de3c2c3552e67432a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:24 -0500 Subject: [PATCH 0569/1534] New translations avoid-eth-calls.mdx (Italian) --- website/src/pages/it/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/it/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/it/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/it/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/it/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 6ce4b30d0b07110666784579bebef95149432753 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:25 -0500 Subject: [PATCH 0570/1534] New translations avoid-eth-calls.mdx (Japanese) --- website/src/pages/ja/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ja/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ja/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/ja/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 48079f8c8714edc3edc08f6abfaad986985af320 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:26 -0500 Subject: [PATCH 0571/1534] New translations avoid-eth-calls.mdx (Korean) --- website/src/pages/ko/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ko/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ko/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/ko/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 523ce24a6cd00acf6060f62acde23fcb00ec9798 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:27 -0500 Subject: [PATCH 0572/1534] New translations avoid-eth-calls.mdx (Dutch) --- website/src/pages/nl/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/nl/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/nl/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/nl/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 9cee667dd7097e0f89cab2afa90ea14c9851a864 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:28 -0500 Subject: [PATCH 0573/1534] New translations avoid-eth-calls.mdx (Polish) --- website/src/pages/pl/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/pl/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/pl/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/pl/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From a0423f6553ec1969c2d9e99d988c345408526fcf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:29 -0500 Subject: [PATCH 0574/1534] New translations avoid-eth-calls.mdx (Portuguese) --- .../pt/subgraphs/cookbook/avoid-eth-calls.mdx | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/pt/subgraphs/cookbook/avoid-eth-calls.mdx index fd40eb79c13c..86cd1db13946 100644 --- a/website/src/pages/pt/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Melhores Práticas de Subgraph Parte 4 - Como Melhorar a Velocidade da Indexação ao Evitar eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR @@ -99,18 +100,18 @@ Nota: `eth_calls` declaradas só podem ser feitas em subgraphs com specVersion m ## Conclusão -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. +O desempenho da indexação pode melhorar muito ao minimizar ou eliminar `eth_calls` nos nossos subgraphs. -## Subgraph Best Practices 1-6 +## Melhores Práticas para um Subgraph 1 – 6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Pruning: Reduza o Excesso de Dados do Seu Subgraph para Acelerar Queries](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Use o @derivedFrom para Melhorar a Resposta da Indexação e de Queries](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Melhore o Desempenho da Indexação e de Queries com o Uso de Bytes como IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Evite `eth-calls` para Acelerar a Indexação](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplifique e Otimize com Séries Temporais e Agregações](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Lance Hotfixes Mais Rápido com Enxertos](/subgraphs/cookbook/grafting-hotfix/) From f648070cca62926be39f2090986489c7621b60c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:30 -0500 Subject: [PATCH 0575/1534] New translations avoid-eth-calls.mdx (Russian) --- .../ru/subgraphs/cookbook/avoid-eth-calls.mdx | 65 ++++++++++--------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ru/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..8473688fd452 100644 --- a/website/src/pages/ru/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,24 +1,25 @@ --- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +title: Лучшая практика субграфа 4 — увеличение скорости индексирования за счет избегания eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- -## TLDR +## Краткое содержание -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. +`eth_calls` — это вызовы, которые могут выполняться из субграфа к ноде Ethereum. Эти вызовы требуют значительного количества времени для возврата данных, что замедляет индексирование. По возможности, проектируйте смарт-контракты так, чтобы они отправляли все необходимые Вам данные, чтобы избежать использования `eth_calls`. -## Why Avoiding `eth_calls` Is a Best Practice +## Почему избегание `eth_calls` является наилучшей практикой -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. +Субграфы оптимизированы для индексирования данных событий, которые исходят из смарт-контрактов. Субграф также может индексировать данные из `eth_call`, однако это значительно замедляет процесс индексирования, так как `eth_calls` требуют выполнения внешних вызовов к смарт-контрактам. Скорость реагирования этих вызовов зависит не от субграфа, а от подключения и скорости ответа ноды Ethereum, к которой отправлен запрос. Минимизируя или полностью исключая `eth_calls` в наших субграфах, мы можем значительно повысить скорость индексирования. -### What Does an eth_call Look Like? +### Что из себя представляет eth_call? -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: +`eth_calls` часто необходимы, когда данные, требуемые для субграфа, недоступны через сгенерированные события. Например, рассмотрим ситуацию, когда субграфу нужно определить, являются ли токены ERC20 частью определенного пула, но контракт генерирует только базовое событие `Transfer` и не создает событие, содержащее нужные нам данные: ```yaml event Transfer(address indexed from, address indexed to, uint256 value); ``` -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: +Предположим, что принадлежность токенов к пулу определяется переменной состояния с именем `getPoolInfo`. В этом случае нам потребуется использовать `eth_call`, чтобы запросить эти данные: ```typescript import { Address } from '@graphprotocol/graph-ts' @@ -28,10 +29,10 @@ import { TokenTransaction } from '../generated/schema' export function handleTransfer(event: Transfer): void { let transaction = new TokenTransaction(event.transaction.hash.toHex()) - // Bind the ERC20 contract instance to the given address: + // Привязка экземпляра контракта ERC20 к указанному адресу: let instance = ERC20.bind(event.address) - // Retrieve pool information via eth_call + // Получение информации о пуле с помощью eth_call let poolInfo = instance.getPoolInfo(event.params.to) transaction.pool = poolInfo.toHexString() @@ -43,17 +44,17 @@ export function handleTransfer(event: Transfer): void { } ``` -This is functional, however is not ideal as it slows down our subgraph’s indexing. +Это функционально, однако не идеально, так как замедляет индексирование нашего субграфа. -## How to Eliminate `eth_calls` +## Как устранить `eth_calls` -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: +В идеале смарт-контракт должен быть обновлён, чтобы эмитировать все необходимые данные внутри событий. Например, модификация смарт-контракта для включения информации о пуле в событие может устранить необходимость в `eth_calls`: ``` event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); ``` -With this update, the subgraph can directly index the required data without external calls: +С этим обновлением субграф может напрямую индексировать необходимые данные без внешних вызовов: ```typescript import { Address } from '@graphprotocol/graph-ts' @@ -72,17 +73,17 @@ export function handleTransferWithPool(event: TransferWithPool): void { } ``` -This is much more performant as it has eliminated the need for `eth_calls`. +Это значительно повышает производительность, так как устраняет потребность в `eth_calls`. -## How to Optimize `eth_calls` +## Как оптимизировать `eth_calls` -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. +Если изменение смарт-контракта невозможно, и `eth_calls` необходимы, прочитайте статью "[Как легко улучшить производительность индексирования субграфа: сокращение eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)" Саймона Эмануэля Шмида, чтобы изучить различные стратегии оптимизации `eth_calls`. -## Reducing the Runtime Overhead of `eth_calls` +## Сокращение времени выполнения `eth_calls` -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. +Для тех `eth_calls`, которые нельзя устранить, накладные расходы на их выполнение можно минимизировать, объявив их в манифесте. Когда `graph-node` обрабатывает блок, все объявленные `eth_calls` выполняются параллельно до запуска обработчиков. Вызовы, не объявленные в манифесте, выполняются последовательно во время работы обработчиков. Улучшение производительности достигается за счет параллельного выполнения вызовов, а не последовательного, что помогает сократить общее время, затраченное на вызовы, но не устраняет его полностью. -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write +В настоящее время `eth_calls` можно объявлять только для обработчиков событий. В манифесте нужно написать ```yaml event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) @@ -91,26 +92,26 @@ calls: ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) ``` -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. +Часть, выделенная желтым, — это объявление вызова. Часть до двоеточия — это текстовая метка, которая используется только в сообщениях об ошибках. Часть после двоеточия имеет форму `Contract[address].function(params)`. Допустимыми значениями для адреса и параметров являются `event.address` и `event.params.`. -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. +Обработчик сам получает результат этого `eth_call`, как и в предыдущем разделе, привязываясь к контракту и выполняя вызов. `graph-node` кеширует результаты объявленных `eth_calls` в памяти, а вызов из обработчика будет извлекать результат из этого кеша в памяти, вместо того чтобы выполнять фактический RPC-вызов. -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. +Примечание: Объявленные `eth_calls` могут быть выполнены только в субграфах с версией спецификации >= 1.2.0. -## Conclusion +## Заключение -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. +Вы можете значительно улучшить производительность индексирования, минимизируя или исключая `eth_calls` в своих субграфах. -## Subgraph Best Practices 1-6 +## Лучшие практики для субграфов 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Увеличение скорости запросов с помощью обрезки субграфов](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Улучшение индексирования и отклика запросов с использованием @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Улучшение индексирования и производительности запросов с использованием неизменяемых объектов и байтов в качестве идентификаторов](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Увеличение скорости индексирования путем избегания `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Упрощение и оптимизация с помощью временных рядов и агрегаций](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Использование переноса (графтинга) для быстрого развертывания исправлений](/subgraphs/cookbook/grafting-hotfix/) From f3e73d45ea64f905343b4135b5a5cb8e54a72d33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:31 -0500 Subject: [PATCH 0576/1534] New translations avoid-eth-calls.mdx (Swedish) --- website/src/pages/sv/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/sv/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/sv/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/sv/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 5c7e1a8c99a2ee5c51abf8259e4f656d15a97031 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:32 -0500 Subject: [PATCH 0577/1534] New translations avoid-eth-calls.mdx (Turkish) --- .../tr/subgraphs/cookbook/avoid-eth-calls.mdx | 65 ++++++++++--------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/tr/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..327df6c4263f 100644 --- a/website/src/pages/tr/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,24 +1,25 @@ --- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +title: Subgraph Örnek Uygulamalar 4 - eth_calls Kullanımından Kaçınarak Endeksleme Hızını Artırma +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- -## TLDR +## Özet -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. +`eth_calls`, bir subgraph'ten bir Ethereum düğümüne yapılan çağrılardır. Bu çağrıların veri döndürmesi ciddi miktarda zaman alır ve endekslemeyi yavaşlatır. Mümkünse akıllı sözleşmelerinizi ihtiyacınız olan tüm verileri yayacak şekilde tasarlayın. Böylece eth_calls kullanmanız gerekmez. -## Why Avoiding `eth_calls` Is a Best Practice +## `eth_calls` Kullanımından Kaçınmanın Önemi -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. +Subgraph'ler, akıllı sözleşmelerden yayılan olay verilerini endekslemek için optimize edilmiştir. Subgraph'ler bir `eth_call` üzerinden gelen verileri de endeksleyebilir. Ancak, `eth_calls`'ın akıllı sözleşmelere harici çağrılar gerektirmesi nedeniyle, bu durum endekslemeyi önemli ölçüde yavaşlatabilir. Bu çağrıların yanıt verme süresi, subgraph'ten ziyade sorgulanan Ethereum düğümünün bağlantısına ve yanıt hızına bağlıdır. Subgraph'lerimizde `eth_calls`ı en aza indirerek veya ortadan kaldırarak, endeksleme hızımızı önemli ölçüde artırabiliriz. -### What Does an eth_call Look Like? +### Bir `eth_call` Nasıl Görünür? -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: +`eth_calls`, gerekli veriler olaylar aracılığıyla sağlanmadığında durumlarda genellikle gereklidir. Örneğin, bir subgraph'in ERC20 token'larının belirli bir havuza ait olup olmadığını belirlemesi gerektiğini, ancak sözleşmenin yalnızca temel bir `Transfer` olayı yaydığını ve ihtiyacımız olan verileri içeren bir olay yaymadığını varsayalım: ```yaml event Transfer(address indexed from, address indexed to, uint256 value); ``` -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: +Diyelim ki token'ların havuz üyeliği, `getPoolInfo` adlı bir durum değişkeni ile belirleniyor. Bu verileri sorgulamak için şu tarz bir `eth_call` kullanmamız gerekir: ```typescript import { Address } from '@graphprotocol/graph-ts' @@ -28,10 +29,10 @@ import { TokenTransaction } from '../generated/schema' export function handleTransfer(event: Transfer): void { let transaction = new TokenTransaction(event.transaction.hash.toHex()) - // Bind the ERC20 contract instance to the given address: + // Belirtilen adrese ERC20 sözleşme örneğini bağla: let instance = ERC20.bind(event.address) - // Retrieve pool information via eth_call + // eth_call aracılığıyla havuz bilgilerini al let poolInfo = instance.getPoolInfo(event.params.to) transaction.pool = poolInfo.toHexString() @@ -43,17 +44,17 @@ export function handleTransfer(event: Transfer): void { } ``` -This is functional, however is not ideal as it slows down our subgraph’s indexing. +Bu kod işlevsel olacaktır; ancak subgraph'imizin endekslenmesini yavaşlattığı için ideal değildir. -## How to Eliminate `eth_calls` +## `eth_calls`'ı Ortadan Kaldırma -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: +İdeal olarak, akıllı sözleşme gerekli tüm verileri olaylar içinde yayacak şekilde güncellenmelidir. Örneğin, havuz bilgilerini olaya dahil edecek şekilde akıllı sözleşmeyi değiştirmek, `eth_calls` ihtiyacını ortadan kaldırabilir: ``` event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); ``` -With this update, the subgraph can directly index the required data without external calls: +Bu güncellemeyle, subgraph harici çağrılara ihtiyaç duymadan gerekli verileri doğrudan endeksleyebilir: ```typescript import { Address } from '@graphprotocol/graph-ts' @@ -72,17 +73,17 @@ export function handleTransferWithPool(event: TransferWithPool): void { } ``` -This is much more performant as it has eliminated the need for `eth_calls`. +Bu metot `eth_calls` ihtiyacını ortadan kaldırdığı için çok daha verimlidir. -## How to Optimize `eth_calls` +## `eth_calls`'ı Optimize Etme -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. +Eğer akıllı sözleşmeyi değiştirmek mümkün değilse ve `eth_calls` kullanmak gerekli ise, Simon Emanuel Schmid tarafından yazılan “[Subgraph Endeksleme Performansını Kolayca İyileştirin: eth_calls'ı Azaltın](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” makalesini okuyarak eth_calls'i optimize etmenin çeşitli stratejilerini öğrenebilirsiniz. -## Reducing the Runtime Overhead of `eth_calls` +## `eth_call` Çağrılarının Fazla Çalışma Yükünü Azaltma -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. +Kaldırılamayan `eth_calls`'lar manifesto içinde deklare edilerek sebep oldukları çalışma zamanı yükü en aza indirilebilir. `graph-node` bir bloğu işlerken, belirtilen tüm eth_calls'ı işleyiciler çalıştırılmadan önce paralel olarak gerçekleştirir. Deklare edilmeyen çağrılar ise işleyiciler çalıştırıldığında sıralı olarak yürütülür. Paralel çağrılar, çağrılara harcanan toplam süreyi azaltmaya yardımcı olur; ancak bu süreyi tamamen ortadan kaldıramaz. -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write +Şu anda `eth_calls` yalnızca olay işleyicileri için deklare edilebilir. Manifesto içerisinde şu şekilde yazılabilir: ```yaml event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) @@ -91,26 +92,26 @@ calls: ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) ``` -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. +Sarıyla vurgulanan bölüm çağrı deklarasyonudur. İki nokta üst üste öncesindeki kısım yalnızca hata mesajları için kullanılan bir metin etiketidir. İki noktadan sonraki kısım ise `Contract[address].function(params)` formundadır. Adres ve parametreler için izin verilen değerler `event.address` ve `event.params.` şeklindedir. -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. +İşleyici, bu `eth_call` sonucuna bir önceki bölümde olduğu gibi sözleşmeye bağlanarak ve çağrıyı yaparak erişir. graph-node, deklare edilen `eth_calls` sonuçlarını bellekte önbelleğe alır. İşleyiciden yapılan çağrı, sonuçları gerçek bir RPC çağrısı yapıp almak yerine, önbellekten alır. -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. +Not: Deklare edilen `eth_calls`, yalnızca specVersion >= 1.2.0 olan subgraph'lerde kullanılabilir. -## Conclusion +## Sonuç -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. +Subgraph'lerinizde `eth_calls`'ı en aza indirerek endeksleme performansını önemli ölçüde artırabilirsiniz. -## Subgraph Best Practices 1-6 +## Subgraph Örnek Uygulamalar 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Subgraph Budama ile Sorgu Hızını İyileştirin](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [@derivedFrom Kullanarak Endeksleme ve Sorgu Yanıt Hızını Artırın](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Değişmez Varlıklar ve Bytes ID'ler Kullanarak Endeksleme ve Sorgu Performansını Artırın](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Endeksleme Hızını `eth_calls`'den Kaçınarak İyileştirin](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Zaman Serileri ve Bütünleştirme ile Basitleştirin ve Optimize Edin](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Hızlı Düzeltme Dağıtımı için Aşılama Kullanın](/subgraphs/cookbook/grafting-hotfix/) From 255b0f72ae71c2e2199305d1b97a6118f01f790b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:33 -0500 Subject: [PATCH 0578/1534] New translations avoid-eth-calls.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/uk/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/uk/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/uk/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From fc43c8f1e0b4930d68ddaba9b61f5aad38e7dad2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:34 -0500 Subject: [PATCH 0579/1534] New translations avoid-eth-calls.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/zh/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/zh/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/zh/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From a870c22aedab176e449d6656eaf0e4f6bf01a357 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:35 -0500 Subject: [PATCH 0580/1534] New translations avoid-eth-calls.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ur/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ur/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/ur/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 3c9ff68984c2cd04e7d7cc3ba48cd59e5f54c93d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:36 -0500 Subject: [PATCH 0581/1534] New translations avoid-eth-calls.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/vi/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/vi/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/vi/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 6aaf19a39f29d8aeda759ab125e2e0711ed52c64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:37 -0500 Subject: [PATCH 0582/1534] New translations avoid-eth-calls.mdx (Marathi) --- website/src/pages/mr/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/mr/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/mr/subgraphs/cookbook/avoid-eth-calls.mdx index a0613bf2b69f..d2c6381a93b2 100644 --- a/website/src/pages/mr/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 8e9046d1b2e659e3dc6baaf7c5dec2d6cb09b333 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:38 -0500 Subject: [PATCH 0583/1534] New translations avoid-eth-calls.mdx (Hindi) --- website/src/pages/hi/subgraphs/cookbook/avoid-eth-calls.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/hi/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/hi/subgraphs/cookbook/avoid-eth-calls.mdx index 5e86a234262a..b8a05ba59fe3 100644 --- a/website/src/pages/hi/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: सबग्राफ सर्वोत्तम प्रथा 4 - eth_calls से बचकर अनुक्रमण गति में सुधार करें +sidebarTitle: "Subgraph Best Practice 4: Avoiding eth_calls" --- ## TLDR From 39a0d20c64c3f305d3ff2cf5121478052b93086d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:39 -0500 Subject: [PATCH 0584/1534] New translations cosmos.mdx (Romanian) --- website/src/pages/ro/subgraphs/cookbook/cosmos.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/cosmos.mdx b/website/src/pages/ro/subgraphs/cookbook/cosmos.mdx index dddfbff521d0..5ccf1025ca47 100644 --- a/website/src/pages/ro/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/cosmos.mdx @@ -6,7 +6,7 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## What are Cosmos subgraphs? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. There are four types of handlers supported in Cosmos subgraphs: @@ -18,9 +18,9 @@ There are four types of handlers supported in Cosmos subgraphs: Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. From a5286981f75b51d6b80bb76395544aa505b2b6fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:40 -0500 Subject: [PATCH 0585/1534] New translations cosmos.mdx (French) --- .../pages/fr/subgraphs/cookbook/cosmos.mdx | 108 +++++++++--------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/cosmos.mdx b/website/src/pages/fr/subgraphs/cookbook/cosmos.mdx index 9c3a34c52a37..962fc4bd1758 100644 --- a/website/src/pages/fr/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## Que sont les subgraphs de Cosmos ? -The Graph permet aux développeurs de traiter les événements de la blockchain et de rendre les données résultantes facilement disponibles via une API GraphQL publique, connue sous le nom de subgraph. Par exemple : [Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable de traiter les événements Cosmos, ce qui signifie que les développeurs peuvent désormais construire des subgraphs pour indexer facilement les événements sur cette chaîne. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. Il existe quatre types de gestionnaires pris en charge dans les subgraphs de Cosmos : -- Les **gestionnaires de blocs** s'exécutent chaque fois qu'un nouveau bloc est ajouté à la chaîne. -- Les **gestionnaires d'événements** s'exécutent lorsqu'un événement spécifique est émis. -- Les **gestionnaires d'événements** s'exécutent lorsqu'un événement spécifique est émis. -- Les **gestionnaires de messages** s'exécutent lorsqu'un message spécifique apparaît. +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -Basé sur la [documentation officielle de Cosmos](https://docs.cosmos.network/) : +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Même si toutes les données sont accessibles avec un gestionnaire de blocs, des gestionnaires tiers permettent aux développeurs de subgraphs de traiter les données de manière beaucoup plus précise. @@ -37,45 +37,45 @@ Même si toutes les données sont accessibles avec un gestionnaire de blocs, des La définition d'un subgraph comporte trois éléments clés : -**subgraph.yaml** : un fichier YAML contenant le manifeste du subgraph, qui identifie les événements à suivre et la façon de les traiter. +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql** : un schéma GraphQL qui définit quelles données sont stockées pour votre subgraph, et comment les interroger via GraphQL. +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**Mappings AssemblyScript** : Code [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) qui a traduit les données de la blockchain vers les entités définies dans votre schéma. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### Définition du manifeste du subgraph -Le manifeste du subgraph (`subgraph.yaml`) identifie les sources de données du subgraph, les déclencheurs d'intérêt et les fonctions (`handlers`) qui doivent être exécutées en réponse à ces déclencheurs. Vous trouverez ci-dessous un exemple de manifeste de subgraph pour un subgraph Cosmos : +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml -version spec: 0.0.5 -description: Exemple de subgraph Cosmos +version spec : 0.0.5 +description : Exemple de subgraph Cosmos schéma: - fichier: ./schema.graphql # lien vers le fichier de schéma + fichier : ./schema.graphql # lien vers le fichier de schéma les sources de données: - - genre: cosmos - nom: CosmosHub - réseau: cosmoshub-4 # Cela changera pour chaque blockchain basée sur le cosmos. Dans ce cas, l’exemple utilise le mainnet Cosmos Hub. - source: - startBlock: 0 # Requis pour Cosmos, définissez-le sur 0 pour démarrer l'indexation à partir de la genèse de la chaîne - cartographie: - Version api: 0.0.7 - langage: wasm/assemblyscript - gestionnaires de blocs: - - handler: handleNewBlock # le nom de la fonction dans le fichier de mappage - Gestionnaires d'événements: - - event: récompenses # le type d'événement qui sera géré - handler: handleReward # le nom de la fonction dans le fichier de mappage - Gestionnaires de transactions: - - handler: handleTransaction # le nom de la fonction dans le fichier de mappage - Gestionnaires de messages: - - message: /cosmos.staking.v1beta1.MsgDelegate # le type d'un message - handler: handleMsgDelegate # le nom de la fonction dans le fichier de mappage - fichier: ./src/mapping.ts # lien vers le fichier avec les mappages Assemblyscript + - genre : cosmos + nom : CosmosHub + réseau : cosmoshub-4 # Cela changera pour chaque blockchain basée sur le cosmos. Dans ce cas, l’exemple utilise le mainnet Cosmos Hub. + source: + startBlock : 0 # Requis pour Cosmos, définissez-le sur 0 pour démarrer l'indexation à partir de la genèse de la chaîne + cartographie : + Version api : 0.0.7 + langage : wasm/assemblyscript + gestionnaires de blocs : + - handler: handleNewBlock # le nom de la fonction dans le fichier de mappage + Gestionnaires d'événements : + - event : récompenses # le type d'événement qui sera géré + handler: handleReward # le nom de la fonction dans le fichier de mappage + Gestionnaires de transactions : + - handler: handleTransaction # le nom de la fonction dans le fichier de mappage + Gestionnaires de messages : + - message : /cosmos.staking.v1beta1.MsgDelegate # le type d'un message + handler : handleMsgDelegate # le nom de la fonction dans le fichier de mappage + fichier : ./src/mapping.ts # lien vers le fichier avec les mappages Assemblyscript ``` -- Les subgraphs cosmos introduisent un nouveau `type` de source de données (`cosmos`). -- Le `réseau` doit correspondre à une chaîne de l'écosystème Cosmos. Dans l’exemple, le mainnet Cosmos Hub est utilisé. +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### Définition de schéma @@ -83,14 +83,14 @@ Schema definition describes the structure of the resulting subgraph database and ### Cartographies AssemblyScript -Les gestionnaires pour le traitement des événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). ```tsx class Block { header: Header - preuve: Liste de preuves + evidence: EvidenceList resultBeginBlock: ResponseBeginBlock resultEndBlock: ResponseEndBlock transactions: Array @@ -165,30 +165,30 @@ class Any { Chaque structure de type de gestionnaire transmise en tant qu'argument à une fonction de mappage. -- Les gestionnaires de blocs reçoivent le type `Block`. -- Les gestionnaires d'événements recevront le type `EventData`. -- Les gestionnaires de transactions recevront le type `TransactionData`. -- Les gestionnaires de messages recevront le type `MessageData`. +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -En tant que partie de `MessageData`, le gestionnaire de messages reçoit un contexte de transaction, qui contient les informations les plus importantes sur une transaction qui englobe un message. Le contexte de transaction est également disponible dans le type `EventData`, mais uniquement lorsque l'événement correspondant est associé à une transaction. En outre, tous les gestionnaires reçoivent une référence à un bloc (`HeaderOnlyBlock`). +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -Vous trouverez la liste complète des types pour l'intégration Cosmos [ici](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### Décodage des messages It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -Un exemple de décodage des données d'un message dans un subgraph peut être trouvé [ici](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Création et construction d'un subgraph Cosmos -La première étape avant de commencer à écrire les mappings du subgraphs est de générer les liaisons de type basées sur les entités qui ont été définies dans le fichier schéma du subgraph (`schema.graphql`). Cela permettra aux fonctions de mappage de créer de nouveaux objets de ces types et de les enregistrer dans le magasin. Ceci est fait en utilisant la commande CLI `codegen` : +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ codegen graph ``` -Une fois que le mapping est prêt, le subgraph peut être construit. Cette étape mettra en évidence toute erreur que le manifeste ou le mapping pourraient avoir. Un subgraph doit être construit avec succès afin d'être déployé sur Graph Node. Ceci est fait en utilisant la commande CLI `build` : +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ construction de graph @@ -196,24 +196,24 @@ $ construction de graph ## Déploiement d'un subgraph Cosmos -Une fois votre subgraph créé, vous pouvez le déployer en utilisant la commande CLI `graph deploy` : +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: **Subgraph Studio** -Visit the Subgraph Studio to create a new subgraph. +Visitez Subgraph Studio pour créer un nouveau subgraph. ```bash -graph deploy subgraph-name +graph deploy nom-du-subgraph ``` **Local Graph Node (based on default configuration):** ```bash -graph create subgraph-name --node http://localhost:8020 +graph create nom-du-subgraph --node http://localhost:8020 ``` ```bash -graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost:5001 +graph deploy nom-du-subgraph --node http://localhost:8020/ --ipfs http://localhost:5001 ``` ## Interroger un subgraph de Cosmos @@ -234,7 +234,7 @@ Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testne ### Osmosis -> Osmosis support in Graph Node and on Subgraph Studio is in beta: please contact the graph team with any questions about building Osmosis subgraphs! +> La prise en charge d'Osmosis dans Graph Node et sur Subgraph Studio est en bêta : veuillez contacter l'équipe de The Graph pour toute question sur la création de subgraphs Osmosis ! #### Qu’est-ce que l’osmose ? @@ -246,7 +246,7 @@ Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. ## Exemples de subgraphs -Here are some example subgraphs for reference: +Voici quelques exemples de subgraphs pour référence : [Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) From 7c1af55119fe5d706fd97a82b71ebd7691d5f242 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:40 -0500 Subject: [PATCH 0586/1534] New translations cosmos.mdx (Spanish) --- .../pages/es/subgraphs/cookbook/cosmos.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/cosmos.mdx b/website/src/pages/es/subgraphs/cookbook/cosmos.mdx index e5f07a8ccce4..3e8c35df064f 100644 --- a/website/src/pages/es/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/es/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## ¿Qué son los subgrafos de Cosmos? -The Graph permite a los developers procesar eventos de blockchain y hacer que los datos resultantes estén fácilmente disponibles a través de una API GraphQL abierta, conocida como subgrafo. [Graph Node](https://github.com/graphprotocol/graph-node) ahora puede procesar eventos de Cosmos, lo que significa que los developers de Cosmos ahora pueden crear subgrafos para indexar fácilmente eventos on-chain. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. Hay cuatro tipos de handlers admitidos en los subgrafos de Cosmos: -- **Block handlers** se ejecutan cada vez que se agrega un nuevo bloque a la cadena. -- **Event handlers** se ejecutan cuando se emite un evento específico. -- Los **handlers de transacciones** se ejecutan cuando se produce una transacción. -- **Message handlers** se ejecutan cuando ocurre un mensaje específico. +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -Basado en la [documentación oficial de Cosmos](https://docs.cosmos.network/): +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Aunque se puede acceder a todos los datos con un handler de bloques, otros handlers permiten a los developers de subgrafos procesar datos de una manera mucho más granular. @@ -37,15 +37,15 @@ Aunque se puede acceder a todos los datos con un handler de bloques, otros handl Hay tres partes clave cuando se trata de definir un subgrafo: -**subgraph.yaml**: un archivo YAML que contiene el manifiesto del subgrafo, que identifica qué eventos rastrear y cómo procesarlos. +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: un esquema de GraphQL que define qué datos se almacenan para su subgrafo y cómo consultarlos a través de GraphQL. +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**Asignaciones AssemblyScript**: código [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) que traduce los datos de la blockchain a las entidades definidas en tu esquema. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### Definición de manifiesto del subgrafo -El manifiesto del subgrafo (`subgraph.yaml`) identifica las fuentes de datos para el subgrafo, los disparadores de interés y las funciones (`handlers`) que deben ejecutarse en respuesta a esos disparadores. Consulte a continuación un manifiesto de subgrafo de ejemplo para un subgrafo de Cosmos: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -74,8 +74,8 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Los subgrafos de Cosmos introducen un nuevo `tipo` de origen de datos (`cosmos`). -- El `network` debe corresponder a una cadena en el ecosistema Cosmos. En el ejemplo, se usa la mainnet de Cosmos Hub. +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### Definición de esquema @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### Asignaciones de AssemblyScript -Los controladores para procesar eventos están escritos en [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -165,30 +165,30 @@ class Any { Cada tipo de handler viene con su propia estructura de datos que se pasa como argumento a una función de mapping. -- Los handlers de bloques reciben el tipo `Block`. -- Los handlers de eventos reciben el tipo `EventData`. -- Los handlers de transacciones reciben el tipo `TransactionData`. -- Los handlers de mensajes reciben el tipo `MessageData`. +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -Como parte de `MessageData`, el message handler recibe un contexto de transacción, que contiene la información más importante sobre una transacción que abarca un mensaje. El contexto de transacción también está disponible en el tipo `EventData`, pero solo cuando el evento correspondiente está asociado con una transacción. Además, todos los controladores reciben una referencia a un bloque (`HeaderOnlyBlock`). +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -Puedes encontrar una lista completa de los tipos para la integración Cosmos aquí [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### Decodificación de mensajes It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -Un ejemplo de cómo decodificar los datos de un mensaje en un subgrafo se puede encontrar [aquí](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Crear y construir un subgrafo de Cosmos -El primer paso antes de comenzar a escribir las asignaciones de subgrafos es generar los enlaces de tipos en función de las entidades que se han definido en el archivo de esquema de subgrafos (`schema.graphql`). Esto permitirá que las funciones de mapeo creen nuevos objetos de esos tipos y los guarden en la tienda. Esto se hace usando el comando CLI `codegen`: +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ graph codegen ``` -Una vez que las esquematizaciones están listas, se debe construir el subgrafo. Este paso resaltará cualquier error que puedan tener el manifiesto o las esquematizaciones. Un subgrafo debe construirse correctamente para deployarse en The Graph Node. Se puede hacer usando el comando CLI `build`: +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build @@ -196,7 +196,7 @@ $ graph build ## Deployando un subgrafo de Cosmos -Una vez que se haya creado su subgrafo, puede implementar su subgrafo usando el comando CLI `graph deployment`: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: **Subgraph Studio** From 0a1a315fbad942ea086f8dbbda2d1f96ac35a002 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:42 -0500 Subject: [PATCH 0587/1534] New translations cosmos.mdx (Arabic) --- website/src/pages/ar/subgraphs/cookbook/cosmos.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/cosmos.mdx b/website/src/pages/ar/subgraphs/cookbook/cosmos.mdx index 64a91ecbc917..29cd28d488a6 100644 --- a/website/src/pages/ar/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/cosmos.mdx @@ -6,7 +6,7 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## What are Cosmos subgraphs? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. There are four types of handlers supported in Cosmos subgraphs: @@ -18,9 +18,9 @@ There are four types of handlers supported in Cosmos subgraphs: Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript Mappings -تمت كتابة المعالجات(handlers) الخاصة بمعالجة الأحداث بـ[ AssemblyScript ](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -196,7 +196,7 @@ $ graph build ## Deploying a Cosmos subgraph -بمجرد إنشاء الـ subgraph الخاص بك ، يمكنك نشره باستخدام الأمر `graph deploy`: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: **Subgraph Studio** From b23c1dffeffc96812ce18dabe77e41240da7aa14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:42 -0500 Subject: [PATCH 0588/1534] New translations cosmos.mdx (Czech) --- .../pages/cs/subgraphs/cookbook/cosmos.mdx | 88 +++++++++---------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/cosmos.mdx b/website/src/pages/cs/subgraphs/cookbook/cosmos.mdx index 5fd2162c63ba..0274cc6b5370 100644 --- a/website/src/pages/cs/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/cosmos.mdx @@ -2,26 +2,26 @@ title: Vytváření podgrafů v Cosmos --- -Tato příručka je úvodem k vytváření podgrafů indexujících [Blokové řetězce založené na Cosmos](https://cosmos.network/). +This guide is an introduction on building subgraphs indexing [Cosmos](https://cosmos.network/) based blockchains. ## Co jsou podgrafy Cosmos? -Graph umožňuje vývojářům zpracovávat blockchainové události a výsledná data snadno zpřístupňovat prostřednictvím otevřeného rozhraní GraphQL API, známého jako podgraf. [Graph Node](https://github.com/graphprotocol/graph-node) je nyní schopen zpracovávat události Cosmos, což znamená, že vývojáři Cosmos nyní mohou vytvářet podgrafy pro snadné indexování událostí v řetězci. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. V podgrafech Cosmos jsou podporovány čtyři typy ovladačů: -- **Obsluhy bloků** se spustí vždy, když je do řetězce přidán nový blok. -- **Obsluhy událostí** se spustí při vyslání určité události. -- **Obsluhy transakcí** -- **Obsluhy zpráv** se spustí při výskytu určité zprávy. +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -Založeno na [oficiální dokumentaci k Cosmos](https://docs.cosmos.network/): +Based on the [official Cosmos documentation](https://docs.cosmos.network/): -> [Události](https://docs.cosmos.network/main/learn/advanced/events) jsou objekty, které obsahují informace o provádění aplikace. Používají je především poskytovatelé služeb, jako jsou průzkumníci bloků a peněženky, ke sledování provádění různých zpráv a indexování transakcí. - -> [Transakce](https://docs.cosmos.network/main/learn/advanced/transactions) jsou objekty vytvořené koncovými uživateli za účelem vyvolání změn stavu aplikace. - -> [Zprávy](https://docs.cosmos.network/main/learn/advanced/transactions#messages) jsou objekty specifické pro modul, které vyvolávají stavové přechody v rámci modulu, ke kterému patří. +> [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. +> +> [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. +> +> [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Přestože ke všem datům lze přistupovat pomocí blokové obsluhy, jiné obsluhy umožňují vývojářům podgrafů zpracovávat data mnohem podrobnějším způsobem. @@ -29,23 +29,23 @@ Přestože ke všem datům lze přistupovat pomocí blokové obsluhy, jiné obsl ### Závislosti podgrafů -[graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) je nástroj CLI pro sestavování a nasazování podgrafů, pro práci se podgrafy Cosmos je vyžadována verze `>=0.30.0`. +[graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a CLI tool to build and deploy subgraphs, version `>=0.30.0` is required in order to work with Cosmos subgraphs. -[graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) je knihovna typů specifických pro podgrafy, pro práci se podgrafy Cosmos je vyžadována verze `>=0.27.0`. +[graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) is a library of subgraph-specific types, version `>=0.27.0` is required in order to work with Cosmos subgraphs. ### Hlavní součásti subgrafu Při definování podgrafu existují tři klíčové části: -**subgraph.yaml**: soubor YAML obsahující manifest subgrafu, který určuje, které události se mají sledovat a jak je zpracovat. +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: schéma GraphQL, které definuje, jaká data jsou uložena pro váš podgraf a jak se na ně dotazovat prostřednictvím GraphQL. +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**AssemblyScript Mapování**: Kód v [AssemblyScript](https://github.com/AssemblyScript/assemblyscript), který překládá data z blockchainu do entit definovaných ve vašem schématu. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### Definice podgrafu Manifest -Manifest podgrafu (`subgraph.yaml`) identifikuje datové zdroje pro podgraf, spouštěče zájmu a funkce (`handlers`), které by měly být spuštěny jako odpověď na tyto spouštěče. Viz níže příklad manifestu podgrafu pro podgraf Cosmos: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -74,16 +74,16 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Podgrafy Cosmos zavádějí nový `druh` zdroje dat (`cosmos`). -- `síť` by měla odpovídat řetězci v ekosystému Cosmos. V příkladu je použita hlavní síť Cosmos Hub. +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### Definice schématu -Definice schématu popisuje strukturu výsledné databáze podgrafů a vztahy mezi entity. To je nezávislé na původním zdroji dat. Podrobnější informace o definici schématu podgrafu naleznete [zde](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mapování -Obslužné programy pro zpracování událostí jsou napsány v jazyce [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -165,30 +165,30 @@ class Any { Každý typ obslužné rutiny má svou vlastní datovou strukturu, která se předává jako argument mapovací funkci. -- Obsluhy bloků přijímají typ `Block`. -- Obsluhy událostí přijímají typ `EventData`. -- Zpracovatelé transakcí obdrží typ `TransactionData`. -- Zpracovatelé zpráv přijímají typ `MessageData`. +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -Jako součást `MessageData` přijímá zpracovatel zpráv kontext transakce, který obsahuje nejdůležitější informace o transakci, která zahrnuje zprávu. Kontext transakce je také k dispozici ve typu `EventData`, ale pouze, když je příslušná událost spojena s transakcí. Kromě toho všichni zpracovatelé obdrží odkaz na blok (`HeaderOnlyBlock`). +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -Úplný seznam typů pro integraci Cosmos najdete [zde](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### Dekódování zpráv -Mějte na paměti, že zprávy Cosmos jsou specifické pro každý řetězec a do podgrafu se doručují jako serializované [zátěže protokolových bufferů](https://protobuf.dev/) užitečné zatížení. Proto musí být data zprávy před zpracováním dekódována v mapovací funkci. +It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -Příklad dekódování dat zprávy v podgrafu naleznete [zde](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Vytvoření a sestavení podgrafu Cosmos -Prvním krokem před začátkem psaní mapovacích funkcí pro podgraf je generovat typové vazby na základě entit definovaných v souboru schématu podgrafu (`schema.graphql`). Toto umožní mapovacím funkcím vytvářet nové objekty těchto typů a ukládat je do úložiště. Toho lze dosáhnout pomocí příkazu `codegen` v příkazovém řádku: +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ graph codegen ``` -Jakmile jsou mapovací funkce připraveny, je třeba sestavit podgraf. Tento krok zvýrazní případné chyby v manifestu nebo mapovacích funkcích. Podgraf musí být úspěšně sestaven, aby mohl být nasazen do Graph Node. Toto lze provést pomocí příkazu `build` v příkazovém řádku: +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build @@ -196,9 +196,9 @@ $ graph build ## Nasazení podgrafu Cosmos -Po vytvoření podgrafu můžete podgraf nasadit pomocí příkazu `graph deploy` CLI: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: -**Podgraf Studio** +**Subgraph Studio** Navštivte Studio podgrafů a vytvořte nový podgraf. @@ -206,7 +206,7 @@ Navštivte Studio podgrafů a vytvořte nový podgraf. graph deploy subgraph-name ``` -**Místní uzel grafu (na základě výchozí config):** +**Local Graph Node (based on default configuration):** ```bash graph create subgraph-name --node http://localhost:8020 @@ -218,7 +218,7 @@ graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost ## Dotazování podgrafu Cosmos -Koncový bod GraphQL pro podgrafy Cosmos je určen definicí schématu se stávajícím rozhraním API. Další informace naleznete v [dokumentaci GraphQL API](/subgraphs/querying/graphql-api/). +The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Podporované blockchainy Cosmos @@ -226,11 +226,11 @@ Koncový bod GraphQL pro podgrafy Cosmos je určen definicí schématu se stáva #### Co je Cosmos Hub? -[Cosmos Hub blockchain](https://hub.cosmos.network/) je první blockchain v ekosystému [Cosmos](https://cosmos.network/). Další informace naleznete v [oficiální dokumentaci](https://docs.cosmos.network/). +The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. #### Sítě -Hlavní síť Cosmos Hub je `cosmoshub-4`. Současná testovací síť Cosmos Hub je `theta-testnet-001`.
    Ostatní sítě Cosmos Hub, jako je `cosmoshub-3`, jsou zastavené, a proto pro ně nejsou poskytována žádná data. +Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
    Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. ### Osmosis @@ -238,20 +238,20 @@ Hlavní síť Cosmos Hub je `cosmoshub-4`. Současná testovací síť Cosmos Hu #### Co je osmosis? -[Osmosis](https://osmosis.zone/) je decentralizovaný, cross-chain automatizovaný tvůrce trhu (AMM) protokol postavený na Cosmos SDK. Umožňuje uživatelům vytvářet vlastní fondy likvidity a obchodovat s tokeny povolenými IBC. Pro více informací můžete navštívit [oficiální dokumentaci](https://docs.osmosis.zone/). +[Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. #### Sítě -Osmosis mainnet je `osmosis-1`. Aktuální testnet Osmosis je `osmo-test-4`. +Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. ## Příklady podgrafů Zde je několik příkladů podgrafů: -[Příklad blokového filtrování](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) -[Příklad odměn validátoru](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) +[Validator Rewards Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) -[Příklad delegování validátoru](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) +[Validator Delegations Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) -[Příklad výměny tokenů Osmosis](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) +[Osmosis Token Swaps Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) From e679b09afdd68a405a6e02695b8091334e8cb7dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:43 -0500 Subject: [PATCH 0589/1534] New translations cosmos.mdx (German) --- .../pages/de/subgraphs/cookbook/cosmos.mdx | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/cosmos.mdx b/website/src/pages/de/subgraphs/cookbook/cosmos.mdx index 8c0cd3b67a2c..d2c95ef10f58 100644 --- a/website/src/pages/de/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/de/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## Was sind Cosmos-Subgrafen? -The Graph ermöglicht es Entwicklern, Blockchain-Ereignisse zu verarbeiten und die resultierenden Daten durch eine offene GraphQL-API, die als Subgraf bezeichnet wird, einfach verfügbar zu machen. [Graph Node](https://github.com/graphprotocol/graph-node) ist jetzt in der Lage, Cosmos-Ereignisse zu verarbeiten, was bedeutet, dass Cosmos-Entwickler jetzt Subgrafen erstellen können, um On-Chain-Ereignisse einfach zu indizieren. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. In Cosmos-Subgrafen werden vier Arten von Handlern unterstützt: -- **Block-Handler** werden ausgeführt, wenn ein neuer Block an die Kette angehängt wird. -- **Ereignis-Handler** werden ausgeführt, wenn ein bestimmtes Ereignis ausgegeben wird. -- **Transaktions-Handler** werden ausgeführt, wenn eine Transaktion stattfindet. -- **Message-Handler** werden ausgeführt, wenn eine bestimmte Nachricht auftritt. +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -Basierend auf der [offiziellen Cosmos-Dokumentation](https://docs.cosmos.network/): +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Obwohl man mit einem Block-Handler auf alle Daten zugegriffen werden kann, ermöglichen andere Handler den Subgrafen-Entwicklern, Daten viel detaillierter zu verarbeiten. @@ -37,15 +37,15 @@ Obwohl man mit einem Block-Handler auf alle Daten zugegriffen werden kann, ermö Bei der Definition eines Subgrafen gibt es drei Schlüsselelemente: -**subgraph.yaml**: eine YAML-Datei, die das Subgraf-Manifest enthält, das angibt, welche Ereignisse verfolgt und wie sie verarbeitet werden sollen. +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: ein GraphQL-Schema, das definiert, welche Daten für Ihren Subgrafen gespeichert werden und wie sie durch GraphQL abgefragt werden. +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript)-Code, der Blockchain-Daten in die definierten Entitäten n Ihrem Schema umsetzt. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### Subgraf-Manifest-Definition -Das Subgraf-Manifest (`subgraph.yaml`) identifiziert die Datenquellen für den Subgraf, die relevanten Trigger und die Funktionen (`handlers`), die als Reaktion auf diese Trigger ausgeführt werden sollen. Unten finden Sie ein Beispiel für ein Subgraf-Manifest für einen Cosmos-Subgrafen: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -74,7 +74,7 @@ dataSources: file: ./src/mapping.ts # Link zur Datei mit den Assemblyscript-Zuordnungen ``` -- Cosmos-Subgrafen führen eine neue `Art` von Daten-Sourcecode (`cosmos`) ein. +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). - The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### Schema-Definition @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript-Mappings -Die Handler für die Ereignisverarbeitung sind in [AssemblyScript](https://www.assemblyscript.org/) geschrieben. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -165,20 +165,20 @@ class Any { Jeder Handler-Typ verfügt über eine eigene Datenstruktur, die als Argument an eine Zuordnungsfunktion übergeben wird. -- Block-Handler erhalten den Typ `Block`. -- Event-Handler erhalten den Typ `EventData`. -- Transaktionshandler erhalten den Typ `TransactionData`. -- Message-Handler erhalten den Typ `MessageData`. +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -Als Teil von `MessageData` erhält der Message-Handler einen Transaktionskontext, der die wichtigste Information zu einer Transaktion enthält, die eine Nachricht einschließt. Der Transaktionskontext ist auch im Typ `EventData` verfügbar, aber nur, wenn das entsprechende Ereignis mit einer Transaktion verknüpft ist. Zusätzlich erhalten alle Handler eine Referenz auf einen Block (`HeaderOnlyBlock`). +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -Die vollständige Liste der Typen für die Cosmos-Integration finden Sie [hier](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### Nachrichtendecodierung It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -Ein Beispiel zum Decodieren von Nachrichtendaten in einem Subgrafen finden Sie [hier](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Creating and building a Cosmos subgraph @@ -188,7 +188,7 @@ The first step before starting to write the subgraph mappings is to generate the $ graph codegen ``` -Sobald die Mappings fertig sind, muss der Subgraf erstellt werden. Dieser Schritt hebt alle Fehler hervor, die das Manifest oder die Mappings haben könnten. Ein Subgraf muss erfolgreich erstellt werden, um auf dem Graph-Knoten bereitgestellt zu werden. Dies kann mit dem CLI-Befehl `build` erfolgen: +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build From d4593724f6b017ca87cb4e976c9dca45657c9916 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:44 -0500 Subject: [PATCH 0590/1534] New translations cosmos.mdx (Italian) --- website/src/pages/it/subgraphs/cookbook/cosmos.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/cosmos.mdx b/website/src/pages/it/subgraphs/cookbook/cosmos.mdx index dddfbff521d0..5ccf1025ca47 100644 --- a/website/src/pages/it/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/it/subgraphs/cookbook/cosmos.mdx @@ -6,7 +6,7 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## What are Cosmos subgraphs? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. There are four types of handlers supported in Cosmos subgraphs: @@ -18,9 +18,9 @@ There are four types of handlers supported in Cosmos subgraphs: Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. From 57db813565f1286b5472b8074950823ed3136554 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:45 -0500 Subject: [PATCH 0591/1534] New translations cosmos.mdx (Japanese) --- .../pages/ja/subgraphs/cookbook/cosmos.mdx | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/cosmos.mdx b/website/src/pages/ja/subgraphs/cookbook/cosmos.mdx index a2e2be68f0e4..008060749aa6 100644 --- a/website/src/pages/ja/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## Cosmosのサブグラフとは何ですか? -Graph は、ブロックチェーンのイベントを処理し、その結果得られたデータを GraphQL API を介して簡単に利用できるようにするためのツールを開発者に提供するもので、個別にはサブグラフとして知られています。[Graph Node](https://github.com/graphprotocol/graph-node)が Cosmosイベントを処理できるようになったということは、Cosmosの開発者がスマートコントラクトの指標となるサブグラフを構築できるようになったということです。 +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. Cosmosのサブグラフでサポートされているハンドラーは4種類あります。 -- **ブロックハンドラー**は、新しいブロックがチェーンに追加されるたびに実行されます。 -- **イベントハンドラー**は、特定のイベントが発生したときに実行されます。 -- **トランザクションハンドラー**は、トランザクションが発生したときに実行されます。 -- **メッセージハンドラー**は、特定のメッセージが発生したときに実行されます。 +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -[Cosmosの公式ドキュメント](https://docs.cosmos.network/)に基づきます。 +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. ブロックハンドラーでは全てのデータにアクセスできますが、その他のハンドラーでは、サブグラフの開発者がよりきめ細かくデータを処理することができます。 @@ -37,15 +37,15 @@ Cosmosのサブグラフでサポートされているハンドラーは4種類 サブグラフの定義には、3つの重要な部分があります。 -**subgraph.yaml**: サブグラフのマニフェストを含むYAMLファイルで、追跡するイベントとその処理方法を特定します。 +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: サブグラフにどのようなデータが保存されているか、また GraphQL を使ってどのようにクエリを行うかを定義します。 +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**AssemblyScript Mappings**です。[AssemblyScript](https://github.com/AssemblyScript/assemblyscript) ブロックチェーンデータからスキーマで定義されたエンティティに変換するコードです。 +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### サブグラフマニフェストの定義 -サブグラフ マニフェスト (`subgraph.yaml`) は、サブグラフのデータ ソース、関心のあるトリガー、およびこれらのトリガーに応答して実行される関数 (`handlers`) を特定します。Cosmos サブグラフのサブグラフ マニフェストの例については、以下を参照してください。 +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -75,7 +75,7 @@ dataSources: ``` - Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). -- `ネットワーク`は、Cosmosエコシステム内のチェーンに対応する必要があります。この例では、Cosmos Hub mainnetが使用されています。 +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### スキーマ定義 @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript マッピング -イベントを処理するためのハンドラは[AssemblyScript](https://www.assemblyscript.org/)で書かれています。 +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -165,30 +165,30 @@ class Any { 各ハンドラタイプは独自のデータ構造を持ち、マッピング関数の引数として渡されます。 -- ブロックハンドラーは、`Block`を受け取ります. -- イベントハンドラーは、`EventData`を受け取ります. -- トランザクションハンドラーは、`TransactionData`を受け取ります. -- メッセージハンドラーは、`MassageData`を受け取ります. +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -`MessageData` の一部として、メッセージハンドラは、メッセージを包含するトランザクションに関する最も重要な情報を含む、トランザクションコンテキストを受け取ります。トランザクションコンテキストは`EventData`型でも利用できますが、対 応するイベントがトランザクションと関連付けられている場合に限ります。さらに、すべてのハンドラはブロック(`HeaderOnlyBlock`) への参照を受け取ります。 +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -Cosmos統合の全種類一覧は[こちら](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts)で確認できます。 +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### メッセージ・デコーディング It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -サブグラフ内のメッセージ データをデコードする方法の例は、[ここ](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts)にあります。 +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Cosmosサブグラフの作成と構築 -サブグラフ マッピングの記述を開始する前の最初のステップは、サブグラフ スキーマ ファイル (`schema.graphql`) で定義されたエンティティに基づいて型バインディングを生成することです。これにより、マッピング関数がそれらのタイプの新しいオブジェクトを作成し、ストアに保存できるようになります。これは、`codegen` CLI コマンドを使用して行います。 +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ graph codegen ``` -マッピングの準備ができたら、サブグラフをビルドする必要があります。このステップでは、マニフェストまたはマッピングにある可能性のあるエラーがハイライトされます。グラフノードにデプロイするためには、サブグラフを正常にビルドする必要があります。これは `build` CLI コマンドを使用して行うことができます。 +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build @@ -196,7 +196,7 @@ $ graph build ## Cosmosサブグラフの展開 -サブグラフが作成されたら、CLI コマンドの`graph deploy`を使ってサブグラフをデプロイすることができます。 +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: **Subgraph Studio** From d098ecfc61cf6d43ce30f558bb45e3cc7dc29aa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:46 -0500 Subject: [PATCH 0592/1534] New translations cosmos.mdx (Korean) --- website/src/pages/ko/subgraphs/cookbook/cosmos.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/cosmos.mdx b/website/src/pages/ko/subgraphs/cookbook/cosmos.mdx index dddfbff521d0..5ccf1025ca47 100644 --- a/website/src/pages/ko/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/cosmos.mdx @@ -6,7 +6,7 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## What are Cosmos subgraphs? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. There are four types of handlers supported in Cosmos subgraphs: @@ -18,9 +18,9 @@ There are four types of handlers supported in Cosmos subgraphs: Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. From 8e368174b0715ae31e6f7a84e2c3154715b2bc39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:47 -0500 Subject: [PATCH 0593/1534] New translations cosmos.mdx (Dutch) --- website/src/pages/nl/subgraphs/cookbook/cosmos.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/cosmos.mdx b/website/src/pages/nl/subgraphs/cookbook/cosmos.mdx index dddfbff521d0..5ccf1025ca47 100644 --- a/website/src/pages/nl/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/cosmos.mdx @@ -6,7 +6,7 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## What are Cosmos subgraphs? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. There are four types of handlers supported in Cosmos subgraphs: @@ -18,9 +18,9 @@ There are four types of handlers supported in Cosmos subgraphs: Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. From 641411fb4571155b68c93f87df66ac8f3649e25b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:48 -0500 Subject: [PATCH 0594/1534] New translations cosmos.mdx (Polish) --- website/src/pages/pl/subgraphs/cookbook/cosmos.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/cosmos.mdx b/website/src/pages/pl/subgraphs/cookbook/cosmos.mdx index dddfbff521d0..5ccf1025ca47 100644 --- a/website/src/pages/pl/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/cosmos.mdx @@ -6,7 +6,7 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## What are Cosmos subgraphs? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. There are four types of handlers supported in Cosmos subgraphs: @@ -18,9 +18,9 @@ There are four types of handlers supported in Cosmos subgraphs: Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. From 1ad202d206e9fb04927f493bcb3125e3eb29c9cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:49 -0500 Subject: [PATCH 0595/1534] New translations cosmos.mdx (Portuguese) --- .../pages/pt/subgraphs/cookbook/cosmos.mdx | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/cosmos.mdx b/website/src/pages/pt/subgraphs/cookbook/cosmos.mdx index 7a38cef0206a..106301863698 100644 --- a/website/src/pages/pt/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/cosmos.mdx @@ -10,18 +10,18 @@ The Graph permite que os programadores processem eventos em blockchain e façam Existem quatro categorias de handlers apoiados em subgraphs no Cosmos: -- **Block handlers** são executados quando um novo bloco é anexado à chain. -- **Event handlers** são executados quando um evento específico é emitido. -- **Transaction handlers** são executados quando uma transação ocorre. -- **Message handlers** são executados quando uma mensagem específica ocorre. +- **Handlers de bloco** são executados quando um novo bloco é anexado à chain. +- **Handlers de evento** são executados quando um evento específico é emitido. +- **Handlers de transação** são executados quando ocorre uma transação. +- **Handlers de mensagem** são executados quando ocorre uma mensagem específica. -Baseado na [documentação oficial do Cosmos](https://docs.cosmos.network/): +Com base na [documentação oficial do Cosmos](https://docs.cosmos.network/): > [Eventos](https://docs.cosmos.network/main/learn/advanced/events) são objetos que contém informação sobre a execução do aplicativo. São principalmente usados por provedores de serviço, como exploradores de blocos e carteiras, para rastrear a execução de várias mensagens e transações de índices. - -> [Transações](https://docs.cosmos.network/main/learn/advanced/transactions) são objetos criados por utilizadores finais para realizar mudanças de estado no aplicativo. - -> [Mensagens](https://docs.cosmos.network/main/learn/advanced/transactions#messages) são objetos específicos a módulos, que realizam transições de estado no escopo do módulo a qual pertencem. +> +> [Transações](https://docs.cosmos.network/main/learn/advanced/transactions) são objetos criados por utilizadores finais para ativar mudanças de estado no aplicativo. +> +> [Mensagens](https://docs.cosmos.network/main/learn/advanced/transactions#messages) são objetos específicos a um módulo que ativam transições de estado, no alcance desse módulo. Apesar de todos os dados poderem ser acessados com um block handler, outros handlers permitem a programadores de subgraphs processar dados de uma maneira muito mais granular. @@ -29,23 +29,23 @@ Apesar de todos os dados poderem ser acessados com um block handler, outros hand ### Dependências de Subgraph -O [graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) é uma ferramenta de CLI para construir e lançar subgraphs. A versão `>=0.30.0` é necessária para trabalhar com subgraphs no Cosmos. +[graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) é uma ferramenta de CLI para construir e lançar subgraphs. A versão `>=0.30.0` é necessária para funcionar com subgraphs do Cosmos. -O [graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) é uma biblioteca de tipos específicos a subgraphs; a versão `>=0.27.0` é necessária para trabalhar com subgraphs no Cosmos. +[graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) é uma biblioteca de tipos de dados específicos a um subgraph. Subgraphs do Cosmos exigem a versão `>=0.27.0`. ### Componentes Principais de Subgraph Quando definir um subgraph, estes três componentes serão muito importantes: -**subgraph.yaml**: um arquivo YAML que contém o manifest do subgraph, além de identificar quais eventos rastrear e como processá-los. +**subgraph.yaml:** um arquivo YAML que contém o manifest do subgraph, e identifica quais eventos rastrear e como processá-los. -**schema.graphql**: um schema do GraphQL que define quais dados serão armazenados para o seu subgraph, e como consultá-los via GraphQL. +**schema.graphql:** um schema da GraphQL que define quais dados são armazenados para o seu subgraph, e como consultá-los em query via GraphQL. -**Mapeamentos de AssemblyScript**: códigos em [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) que traduzem dos dados da blockchain às entidades definidas no seu schema. +**Mapeamentos do AssemblyScript**: Código do [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) que traduz de dados da blockchain para as entidades definidas no seu schema. ### Definição de Manifest de Subgraph -O manifest do subgraph (`subgraph.yaml`) identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções (`handlers`) que devem ser executadas em resposta àqueles gatilhos. Veja abaixo um exemplo de um manifest de subgraph para um subgraph no Cosmos: +O manifest do subgraph (`subgraph.yaml`) identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções (`handlers`) a executar em resposta àqueles gatilhos. Veja abaixo um exemplo de um manifest para um subgraph no Cosmos: ```yaml specVersion: 0.0.5 @@ -74,18 +74,18 @@ dataSources: file: ./src/mapping.ts # link ao arquivo com os mapeamentos em AssemblyScript ``` -- Subgraphs no Cosmos introduzem um novo `tipo` de fonte de dados (`cosmos`). +- Subgraphs no Cosmos introduzem um novo tipo de fonte de dados (`cosmos`). - A rede (`network`) deve corresponder a uma chain no ecossistema Cosmos. O exemplo acima usa a mainnet do Cosmos Hub. ### Definição de Schema -A definição do schema descreve a estrutura do banco de dados do subgraph resultante e os relacionamentos entre entidades. Isto é agnóstico à fonte de dados original. Mais detalhes na definição do schema de subgraph [aqui](/developing/creating-a-subgraph/#the-graphql-schema). +A definição de Schema descreve a estrutura do banco de dados resultado do subgraph, e os relacionamentos entre entidades. Isto é agnóstico da fonte de dados original. Para mais detalhes na definição de schema de subgraph, [clique aqui](/developing/creating-a-subgraph/#the-graphql-schema). ### Mapeamentos em AssemblyScript -Os handlers para o processamento de eventos são escritos em [AssemblyScript](https://www.assemblyscript.org/). +Os handlers para processamento de eventos estão escritos em [AssemblyScript](https://www.assemblyscript.org/). -Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +A indexação do Cosmos introduz tipos de dados específicos para esse ecossistema à [API do AssemblyScript](/subgraphs/developing/creating/graph-ts/api/). ```tsx class Block { @@ -172,23 +172,23 @@ Cada tipo de handler vem com a sua própria estrutura de dados, a ser passada co Como parte do `MessageData`, o handler de mensagens recebe um contexto de transação, que contém as informações mais importantes sobre uma transação a compor uma mensagem. O contexto da transação também está disponível no tipo `EventData`, mas só quando o evento correspondente é associado a uma transação. Além disso, todos os handlers recebem uma referência a um bloco (`HeaderOnlyBlock`). -A lista completa de tipos para integração ao Cosmos está [aqui](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +Veja a lista completa de tipos para integração do Cosmos [aqui](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### Descodificação de mensagens -Repare que mensagens no Cosmos são específicas a chains, e são passadas a um subgraph na forma de um payload serializado de [Buffers de Protocolo](https://protobuf.dev/). Portanto, os dados das mensagens devem ser decodificados em uma função de mapeamento antes que possam ser processados. +Repare que mensagens no Cosmos são específicas a chains, e são passadas a um subgraph na forma de um payload serializado de [Buffers de Protocolo](https://protobuf.dev/). Portanto, os dados das mensagens devem ser decodificados numa função de mapeamento antes que possam ser processados. -Mais informações sobre como descodificar dados de mensagem em um subgraph [aqui](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +Veja um exemplo de como decodificar dados de mensagem em um subgraph [aqui](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Criação e construção de um subgraph no Cosmos -Antes de começar a escrever os mapeamentos do subgraph, o primeiro passo é gerar os vínculos baseados nas entidades definidas no arquivo de schema do subgraph (`schema.graphql`). Assim, as funções de mapeamento criarão novos objetos destes tipos e os salvarão ao armazenamento. Isto é feito usando o seguinte comando CLI `codegen`: +Antes de começar a escrever os mapeamentos do subgraph, o primeiro passo é gerar os vínculos baseados nas entidades definidas no arquivo de schema do subgraph (`schema.graphql`). Assim, as funções de mapeamento criarão novos objetos destes tipos e os salvarão ao armazenamento. Isto é feito com o seguinte comando de CLI `codegen`: ```bash $ graph codegen ``` -Quando os mapeamentos estiverem prontos, o subgraph precisará ser construído. Este passo destacará quaisquer erros possíveis no manifest ou nos mapeamentos. Um subgraph deve ser construído com êxito para ser lançado ao Graph Node. Isto é possível com o seguinte comando CLI `build`: +Quando os mapeamentos estiverem prontos, o subgraph precisará ser construído. Este passo destacará quaisquer erros possíveis no manifest ou nos mapeamentos. Um subgraph deve ser construído com êxito para ser implantado no Graph Node. Isto é possível com o seguinte comando de CLI `build`: ```bash $ graph build @@ -218,7 +218,7 @@ graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost ## Consulta a um subgraph no Cosmos -O endpoint do GraphQL para subgraphs no Cosmos é determinado pela definição do schema, com a interface existente da API. Mais informações na [documentação da API GraphQL](/subgraphs/querying/graphql-api/). +O ponto final do GraphQL para subgraphs no Arweave é determinado pela definição do schema, com a interface existente da API. Visite a [documentação da API da GraphQL](/subgraphs/querying/graphql-api/) para mais informações. ## Apoio a Blockchains no Cosmos @@ -226,7 +226,7 @@ O endpoint do GraphQL para subgraphs no Cosmos é determinado pela definição d #### O que é Cosmos Hub? -A [Cosmos Hub](https://hub.cosmos.network/) é a primeira blockchain no ecossistema [Cosmos](https://cosmos.network/). Para saber mais, leia a [documentação oficial](https://docs.cosmos.network/). +A [blockchain Cosmos Hub](https://hub.cosmos.network/) é a primeira blockchain no ecossistema [Cosmos](https://cosmos.network/). Para mais informações, veja a [documentação oficial](https://docs.cosmos.network/). #### Redes @@ -238,7 +238,7 @@ A mainnet atual do Cosmos Hub é `cosmoshub-4`, e a testnet atual do Cosmos Hub #### O que é Osmosis? -[Osmosis](https://osmosis.zone/) é um protocolo criador automático de mercado (AMM), descentralizado e cross-chain, construído em cima do SDK do Cosmos. Ele permite que os utilizadores criem pools customizados de liquidez e troquem tokens IBC. Para mais informações, visite a [documentação oficial](https://docs.osmosis.zone/). +O [Osmosis](https://osmosis.zone/) é um protocolo AMM (formador de mercado automatizado) cross-chain, construído sobre o kit de programão Cosmos. Este permite que os utilizadores criem pools customizadas de liquidez e troquem tokens com apoio a protocolos IBC (comunicação inter-blockchain). Para mais informações, veja a [documentação oficial](https://docs.osmosis.zone/). #### Redes @@ -248,7 +248,7 @@ A mainnet do Osmosis é `osmosis-1`, e a testnet atual do Osmosis é `osmo-test- Aqui estão alguns exemplos de subgraphs para referência: -[Exemplo de Filtragem de Blocos](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Exemplo de Filtração de Blocos](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) [Exemplo de Recompensas de Validador](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) From b0980c5b5af455c520274c177f830b7f61df4278 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:50 -0500 Subject: [PATCH 0596/1534] New translations cosmos.mdx (Russian) --- .../pages/ru/subgraphs/cookbook/cosmos.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/cosmos.mdx b/website/src/pages/ru/subgraphs/cookbook/cosmos.mdx index ef567af1ddde..cfa4b94b970f 100644 --- a/website/src/pages/ru/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/cosmos.mdx @@ -2,26 +2,26 @@ title: Создание субграфов на Cosmos --- -Это руководство представляет собой введение в создание субграфов, индексирующих блокчейны на основе [Cosmos](https://cosmos.network/). +Это руководство служит введением в создание субграфов для индексирования блокчейнов, основанных на экосистеме [Cosmos](https://cosmos.network/). ## Что такое субграфы на Cosmos? -The Graph позволяет разработчикам обрабатывать события блокчейна и делать полученные данные легко доступными через открытый API GraphQL, известный как субграф. [Graph Node](https://github.com/graphprotocol/graph-node) теперь может обрабатывать события Cosmos, а это означает, что разработчики Cosmos теперь могут создавать субграфы для удобного индексирования событий в сети. +The Graph позволяет разработчикам обрабатывать события блокчейна и предоставлять полученные данные через открытый GraphQL API, называемый субграфом. [Graph Node](https://github.com/graphprotocol/graph-node) теперь способен обрабатывать события Cosmos, что означает, что разработчики Cosmos могут создавать субграфы для удобного индексирования событий на чейне. Существует четыре типа обработчиков, поддерживаемых в субграфах Cosmos: -- **Обработчики блоков** запускаются всякий раз, когда к чейну добавляется новый блок. -- **Обработчики событий** запускаются при возникновении определенного события. -- **Обработчики транзакций** запускаются, когда происходит транзакция. -- ** Обработчики сообщений** запускаются при появлении определенного сообщения. +- **Обработчики блоков** запускаются каждый раз, когда новый блок добавляется в чейн. +- **Обработчики событий** запускаются, когда происходит определённое событие. +- **Обработчики транзакций** запускаются при выполнении транзакции. +- **Обработчики сообщений** запускаются при появлении определённого сообщения. На основе [официальной документации Cosmos](https://docs.cosmos.network/): -> [События](https://docs.cosmos.network/main/learn/advanced/events) — это объекты, содержащие информацию о выполнении приложения. В основном они используются поставщиками услуг, такими как обозреватели блоков и кошельки, для отслеживания выполнения различных сообщений и индексных транзакций. - -> [Транзакции](https://docs.cosmos.network/main/learn/advanced/transactions) — это объекты, созданные конечными пользователями для инициирования изменений состояния приложения. - -> [Сообщения](https://docs.cosmos.network/main/learn/advanced/transactions#messages) — это объекты, специфичные для модуля, которые запускают переходы состояний в пределах модуля, которому они принадлежат. +> [События](https://docs.cosmos.network/main/learn/advanced/events) — это объекты, содержащие информацию о выполнении приложения. Они используются в основном сервисами, такими как сервисы для отслеживания блоков и кошельки, для отслеживания выполнения различных сообщений и индексирования транзакций. +> +> [Транзакции](https://docs.cosmos.network/main/learn/advanced/transactions) — это объекты, создаваемые конечными пользователями для инициирования изменений состояния в приложении. +> +> [Сообщения](https://docs.cosmos.network/main/learn/advanced/transactions#messages) — это специфичные для модулей объекты, которые инициируют переходы состояния в рамках модуля, к которому они относятся. Хотя доступ ко всем данным можно получить с помощью обработчика блоков, другие обработчики позволяют разработчикам субграфов обрабатывать данные гораздо более детально. @@ -29,9 +29,9 @@ The Graph позволяет разработчикам обрабатывать ### Зависимости субграфа -[graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) — это инструмент CLI для создания и развертывания субграфов. Для работы с субграфами Cosmos требуется версия `>=0.30.0`. +[graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) — это инструмент командной строки для создания и развертывания субграфов. Для работы с субграфами Cosmos требуется версия `>=0.30.0`. -[graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) — это библиотека типов, специфичных для субграфов. Для работы с субграфами Cosmos требуется версия `>=0.27.0`. +[graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) — это библиотека типов, специфичных для субграфов, для работы с субграфами Cosmos требуется версия `>=0.27.0`. ### Основные компоненты субграфа @@ -39,13 +39,13 @@ The Graph позволяет разработчикам обрабатывать **subgraph.yaml**: файл YAML, содержащий манифест субграфа, который определяет, какие события отслеживать и как их обрабатывать. -**schema.graphql**: схема GraphQL, которая определяет, какие данные хранятся для Вашего субграфа и как их запрашивать через GraphQL. +**schema.graphql**: схема GraphQL, которая определяет, какие данные хранятся для Вашего субграфа и как их можно запрашивать через GraphQL. -**AssemblyScript Mappings**: код [AssemblyScript](https://github.com/AssemblyScript/assemblyscript), преобразующий данные блокчейна в определенные объекты в Вашей схеме. +**Мэппинги на AssemblyScript**: код на [AssemblyScript](https://github.com/AssemblyScript/assemblyscript), который преобразует данные блокчейна в объекты, определенные в Вашей схеме. ### Определение манифеста субграфа -Манифест субграфа (`subgraph.yaml`) определяет источники данных для субграфа, релевантные триггеры и функции (`handlers`), которые должны выполняться в ответ на эти триггеры. Ниже приведен пример манифеста субграфа для субграфа на Cosmos: +Манифест субграфа (`subgraph.yaml`) идентифицирует источники данных для субграфа, триггеры, представляющие интерес, и функции (`handlers`), которые должны быть выполнены в ответ на эти триггеры. Ниже приведен пример манифеста субграфа для субграфа Cosmos: ```yaml specVersion: 0.0.5 @@ -74,18 +74,18 @@ dataSources: file: ./src/mapping.ts # ссылка на мэппинг-файл скрипта сборки ``` -- Субграфы на Cosmos представляют новый `вид` источника данных (`cosmos`). -- `network` должен соответствовать чейну в экосистеме Cosmos. В примере используется майннет Cosmos Hub. +- Субграфы Cosmos вводят новый `kind` (тип) источника данных (`cosmos`). +- `network` (сеть) должна соответствовать чейну в экосистеме Cosmos. В этом примере используется майннет Cosmos Hub. ### Определение схемы -Определение схемы описывает структуру результирующей базы данных субграфа и взаимосвязи между объектами. Это не зависит от исходного источника данных. Более подробная информация об определении схемы субграфа приведена [здесь](/developing/creating-a-subgraph/#the-graphql-schema). +Определение схемы описывает структуру итоговой базы данных субграфа и отношения между объектами. Это не зависит от исходного источника данных. Более подробную информацию об определении схемы субграфа можно найти [здесь](/developing/creating-a-subgraph/#the-graphql-schema). ### Мэппинги AssemblyScript -Обработчики событий написаны на языке [AssemblyScript](https://www.assemblyscript.org/). +Обработчики для обработки событий написаны на [AssemblyScript](https://www.assemblyscript.org/). -Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +Индексирование Cosmos вводит специфичные для Cosmos типы данных в [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). ```tsx class Block { @@ -170,25 +170,25 @@ class Any { - Обработчики транзакций получают тип `TransactionData`. - Обработчики сообщений получают тип `MessageData`. -Как часть `MessageData` обработчик сообщения получает контекст транзакции, который содержит наиболее важную информацию о транзакции, включающей сообщение. Контекст транзакции также доступен в типе `EventData`, но только, когда соответствующее событие связано с транзакцией. Кроме того, все обработчики получают ссылку на блок (`HeaderOnlyBlock`). +Как часть `MessageData`, обработчик сообщений получает контекст транзакции, который содержит наиболее важную информацию о транзакции, в рамках которой было выполнено сообщение. Контекст транзакции также доступен в типе `EventData`, но только когда соответствующее событие связано с транзакцией. Кроме того, все обработчики получают ссылку на блок (`HeaderOnlyBlock`). -Полный список типов для интеграции Cosmos можно найти [здесь](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +Полный список типов для интеграции с Cosmos можно найти [здесь](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### Расшифровка сообщений -Важно отметить, что сообщения Cosmos специфичны для каждой чейна и передаются в субграф в виде сериализованной нагрузки [Protocol Buffers](https://protobuf.dev/). В результате данные сообщения должны быть декодированы в функции мэппинга перед их обработкой. +Важно отметить, что сообщения Cosmos специфичны для каждого чейна и передаются в субграф в виде сериализованного полезного груза [Protocol Buffers](https://protobuf.dev/). В результате, данные сообщения необходимо декодировать в функции мэппинга, прежде чем они смогут быть обработаны. -Пример того, как расшифровываются данные сообщения в субграфе, можно найти [здесь](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +Пример того, как декодировать данные сообщений в субграфе, можно найти [здесь](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Создание и построение субграфа на Cosmos -Первым шагом перед началом написания мэппингов субграфов является создание привязок типов на основе объектов, определенных в файле схемы субграфа (`schema.graphql`). Это позволит функциям мэппинга создавать новые объекты этих типов и сохранять их в хранилище. Для этого используется команда CLI `codegen`: +Первым шагом перед написанием мэппингов субграфа является генерация привязок типов на основе объектов, определённых в файле схемы субграфа (`schema.graphql`). Это позволит функциям мэппинга создавать новые объекты этих типов и сохранять их в хранилище. Это делается с помощью команды CLI `codegen`: ```bash $ graph codegen ``` -После того, как мэппинги готовы, необходимо построить субграф. На этом шаге будут выделены все ошибки, которые могут быть в манифесте или мэппингах. Субграф должен быть успешно построен, чтобы его можно было развернуть на Graph Node. Это можно сделать с помощью команды `build` командной строки: +Как только мэппинги будут готовы, нужно построить субграф. Этот шаг выявит все ошибки, которые могут быть в манифесте или мэппингах. Субграф должен успешно построиться, чтобы его можно было развернуть на Graph Node. Это делается с помощью команды CLI `build`: ```bash $ graph build @@ -196,7 +196,7 @@ $ graph build ## Развертывание субграфа на Cosmos -Как только Ваш субграф создан, Вы можете развернуть его с помощью команды `graph deploy` CLI: +После того как субграф создан, его можно развернуть с помощью команды `graph deploy` в CLI: **Subgraph Studio** @@ -206,7 +206,7 @@ $ graph build graph deploy subgraph-name ``` -**Локальная Graph Node (на основе конфигурации по умолчанию):** +**Локальный Graph Node (на основе конфигурации по умолчанию):** ```bash graph create subgraph-name --node http://localhost:8020 @@ -218,7 +218,7 @@ graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost ## Запрос субграфа на Cosmos -Конечная точка GraphQL для субграфов на Cosmos устанавливается определением схемы с помощью существующего интерфейса API. Дополнительную информацию можно найти в [документации GraphQL API](/subgraphs/querying/graphql-api/). +Конечная точка GraphQL для субграфов Cosmos определяется схемой, с использованием существующего интерфейса API. Пожалуйста, изучите [документацию по GraphQL API](/subgraphs/querying/graphql-api/) для получения дополнительной информации. ## Поддерживаемые блокчейны Cosmos @@ -226,11 +226,11 @@ graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost #### Что такое Cosmos Hub? -[Блокчейн Cosmos Hub](https://hub.cosmos.network/) — первый блокчейн в экосистеме [Cosmos](https://cosmos.network/). Дополнительную информацию можно найти в [официальной документации](https://docs.cosmos.network/). +[Блокчейн Cosmos Hub](https://hub.cosmos.network/) — это первый блокчейн в экосистеме [Cosmos](https://cosmos.network/). Для получения дополнительной информации Вы можете ознакомиться с [официальной документацией](https://docs.cosmos.network/). #### Сети -Основная сеть Cosmoshub — `cosmoshub-4`. Текущая тестовая сеть Cosmos Hub — `theta-testnet-001`.
    Другие сети Cosmos Hub, например, `cosmoshub-3`, остановлены, поэтому данные для них не предоставляются. +Майннет Cosmos Hub называется `cosmoshub-4`. Текущий тестнет Cosmos Hub — `theta-testnet-001`.
    Другие сети Cosmos Hub, такие как `cosmoshub-3`, приостановлены, поэтому данные для них не предоставляются. ### Osmosis @@ -238,20 +238,20 @@ graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost #### Что такое Osmosis? -[Osmosis](https://osmosis.zone/) – это децентрализованный межсетевой протокол автоматизированного маркет-мейкера (AMM), построенный на основе Cosmos SDK. Он позволяет пользователям создавать собственные пулы ликвидности и торговать токенами с поддержкой IBC. Дополнительную информацию можно найти в [официальной документации](https://docs.osmosis.zone/). +[Osmosis](https://osmosis.zone/) — это децентрализованный протокол автоматизированного маркет-мейкера (AMM), построенный на основе Cosmos SDK. Он позволяет пользователям создавать собственные пулы ликвидности и торговать токенами, поддерживающими IBC. Для получения дополнительной информации, Вы можете ознакомиться с [официальной документацией](https://docs.osmosis.zone/). #### Сети -Майннет Osmosis — `osmosis-1`. Текущая тестовая сеть Osmosis — `osmo-test-4`. +Майннет Osmosis — это `osmosis-1`. Текущий тестнет Osmosis — это `osmo-test-4`. ## Примеры субграфов Вот несколько примеров субграфов для справки: -[Пример блочной фильтрации](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Пример фильтрации блоков](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) -[Пример вознаграждения валидатора](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) +[Пример вознаграждений валидатора](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) -[Пример делегирования валидатору](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) +[Пример делегирования валидатора](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) -[Пример свопа токенов на Osmosis](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) +[Пример обмена токенов Osmosis](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) From 240260ccc3a8d2acce1dae83aa73bfda9e19388b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:51 -0500 Subject: [PATCH 0597/1534] New translations cosmos.mdx (Swedish) --- .../pages/sv/subgraphs/cookbook/cosmos.mdx | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/cosmos.mdx b/website/src/pages/sv/subgraphs/cookbook/cosmos.mdx index 3c806079a105..75e5475585f3 100644 --- a/website/src/pages/sv/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## Vad är Cosmos subgrafer? -Graph tillåter utvecklare att bearbeta blockchain-händelser och göra den resulterande informationen lätt tillgänglig via en öppen GraphQL API, känd som en subgraf. [Graph Node](https://github.com/graphprotocol/graph-node) kan nu bearbeta Cosmos-händelser, vilket innebär att Cosmos-utvecklare nu kan bygga subgrafer för att enkelt indexera händelser i kedjan. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. Det finns fyra typer av hanterare som stöds i Cosmos subgrafer: -- ** Blockhanterare** körs när ett nytt block läggs till i kedjan. -- **Händelsehanterare** körs när en specifik händelse sänds ut. -- **Transaktionshanterare** körs när en transaktion inträffar. -- **Meddelandehanterare** körs när ett specifikt meddelande visas. +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -Baserat på den [officiella Cosmos-dokumentationen](https://docs.cosmos.network/): +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Även om all data kan nås med en blockhanterare, gör andra hanterare det möjligt för subgraf utvecklare att behandla data på ett mycket mer detaljerat sätt. @@ -37,15 +37,15 @@ Baserat på den [officiella Cosmos-dokumentationen](https://docs.cosmos.network/ Det finns tre viktiga delar när det gäller att definiera en subgraf: -**subgraph.yaml**: en YAML fil som innehåller subgraf manifestet, som identifierar vilka händelser som ska spåras och hur de ska behandlas. +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: ett GraphQL schema som definierar vilken data som lagras för din subgraf och hur du frågar efter den via GraphQL. +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**AssemblyScript mappningar**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript)-kod som översätter från blockchain data till de definierade enheterna i ditt schema. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### Definition av subgraf manifestet -Subgrafmanifestet (`subgraph.yaml`) identifierar datakällorna för subgrafen, utlösare av intresse och funktionerna (`hanterare`) som ska köras som svar på dessa utlösare. Se nedan för ett exempel på subgraf manifest för en Cosmos subgraf: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -74,8 +74,8 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Cosmos subgrafer introducerar en ny `typ` av datakälla (`cosmos`). -- `Nätverket` bör motsvara en kedja i Cosmos ekosystem. I exemplet används Cosmos Hub huvudnät. +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### Schema Definition @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript mappningar -Hanterarna för bearbetning av händelser är skrivna i [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -170,25 +170,25 @@ Varje hanterartyp kommer med sin egen datastruktur som skickas som ett argument - Transaction handlers receive the `TransactionData` type. - Message handlers receive the `MessageData` type. -Som en del av `MessageData` får meddelandehanteraren en transaktionskontext, som innehåller den viktigaste informationen om en transaktion som omfattar ett meddelande. Transaktionskontexten är också tillgänglig i typen `EventData`, men bara när motsvarande händelse är associerad med en transaktion. Dessutom får alla hanterare en referens till ett block (`HeaderOnlyBlock`). +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -Du hittar hela listan över typer för Cosmos integrationen [här](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### Meddelan avkodning It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -Ett exempel på hur man avkodar meddelandedata i en subgraf finns [här](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Skapa och bygga en Cosmos subgraf -Det första steget innan du börjar skriva subgrafmappningarna är att generera typbindningarna baserat på de entiteter som har definierats i subgrafschemafilen (`schema.graphql`). Detta gör det möjligt för mappningsfunktionerna att skapa nya objekt av den typen och spara dem i butiken. Detta görs genom att använda `codegen` CLI-kommandot: +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ graph codegen ``` -När mappningarna är klara måste subgrafen byggas. Det här steget kommer att markera eventuella fel som manifestet eller mappningarna kan ha. En subgraf måste byggas framgångsrikt för att kunna distribueras till Graph Node. Det kan göras med kommandot `build` CLI: +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build @@ -196,9 +196,9 @@ $ graph build ## Distribuera en Cosmos subgraf -När din subgraf har skapats kan du distribuera din subgraf genom att använda `graph deploy` CLI kommandot: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: -**Subgraf Studion** +**Subgraph Studio** Visit the Subgraph Studio to create a new subgraph. From 3a45116dbd0d6ba7c09f8466b84148c16fa5d6e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:52 -0500 Subject: [PATCH 0598/1534] New translations cosmos.mdx (Turkish) --- .../pages/tr/subgraphs/cookbook/cosmos.mdx | 96 +++++++++---------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/cosmos.mdx b/website/src/pages/tr/subgraphs/cookbook/cosmos.mdx index 3cc5dc639e63..507bc7acb87d 100644 --- a/website/src/pages/tr/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/cosmos.mdx @@ -2,26 +2,26 @@ title: Cosmos'ta Subgraph'ler Oluşturma --- -This guide is an introduction on building subgraphs indexing [Cosmos](https://cosmos.network/) based blockchains. +Bu rehber, [Cosmos](https://cosmos.network/) tabanlı blokzincirleri endeksleyen subgraph'ler oluşturma konusunda bir giriş niteliğindedir. ## Cosmos subgraph'leri nelerdir? -Graph, geliştiricilerin blockchain etkinliklerini işlemesine ve ortaya çıkan verileri subgraph olarak bilinen açık bir GraphQL API aracılığıyla kolayca kullanılabilir hale getirmesine olanak tanır. [Graph Düğümü](https://github.com/graphprotocol/graph-node) artık Cosmos etkinliklerini işleyebilir, bu da Cosmos geliştiricilerinin artık zincir üstü olayları kolayca dizine eklemek için subgraph'ler oluşturabileceği anlamına gelir. +The Graph, geliştiricilerin blokzinciri olaylarını işlemesine ve sonuçta ortaya çıkan veriyi açık bir GraphQL API'ı (subgraph olarak bilinir) üzerinden kolayca erişilebilir hale getirmesine olanak tanır. [Graph Düğümü](https://github.com/graphprotocol/graph-node), artık Cosmos olaylarını işleyebiliyor. Bu da Cosmos geliştiricilerinin zincir üzerindeki olayları kolayca endekslemek için subgraph'ler oluşturabileceği anlamına geliyor. Cosmos subgraph'lerinde desteklenen dört tür işleyici vardır: -- **Blok işleyicileri**, zincire her yeni blok eklendiğinde çalışır. -- **Olay işleyicileri**, belirli bir olay yayınlandığında çalışır. -- **İşlem işleyicileri**, bir işlem gerçekleştiğinde çalışır. -- **Mesaj işleyicileri**, belirli bir mesaj oluştuğunda çalışır. +- **Blok işleyicileri** zincire yeni bir blok eklendiğinde çalışır. +- **Olay işleyicileri** belirli bir olay yayıldığında çalışır. +- **İşlem işleyicileri** bir işlem gerçekleştiğinde çalışır. +- **Mesaj işleyicileri** belirli bir mesaj oluştuğunda çalışır. -[resmi Cosmos belgelerine](https://docs.cosmos.network/) göre: +[Cosmos'un resmi dokümantasyonuna](https://docs.cosmos.network/) dayalı olarak: -> [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - -> [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - -> [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. +> [Olaylar](https://docs.cosmos.network/main/learn/advanced/events), uygulamanın yürütülmesi hakkında bilgi içeren nesnelerdir. Genellikle blok gezginleri ve cüzdanlar gibi hizmet sağlayıcılar tarafından çeşitli mesajların yürütülmesini izlemek ve işlemleri endekslemek için kullanılırlar. +> +> [İşlemler](https://docs.cosmos.network/main/learn/advanced/transactions), uygulamada durum değişikliklerini tetiklemek için son kullanıcılar tarafından oluşturulan nesnelerdir. +> +> [Mesajlar](https://docs.cosmos.network/main/learn/advanced/transactions#messages), ait oldukları modül kapsamındaki durum geçişlerini tetikleyen modüle özel nesnelerdir. Tüm verilere bir blok işleyici ile erişilebilmesine rağmen, diğer işleyiciler, subgraph geliştiricilerin verileri çok daha ayrıntılı bir şekilde işlemesine olanak tanır. @@ -29,23 +29,23 @@ Tüm verilere bir blok işleyici ile erişilebilmesine rağmen, diğer işleyici ### Subgraph Gereksinimleri -[graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a CLI tool to build and deploy subgraphs, version `>=0.30.0` is required in order to work with Cosmos subgraphs. +[graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), subgraph'leri oluşturmak ve dağıtmak için bir CLI aracıdır. Cosmos subgraph'leri ile çalışabilmek için `>=0.30.0` sürümü gereklidir. -[graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) is a library of subgraph-specific types, version `>=0.27.0` is required in order to work with Cosmos subgraphs. +[graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts), subgraph'e özel türlerden oluşan bir kütüphanedir ve Cosmos subgraph'leri ile çalışabilmek için `>=0.27.0` sürümü gereklidir. ### Subgraph Ana Bileşenleri Bir subgraph'i tanımlama noktasında üç anahtar kısım vardır: -**subgraph.yaml**: hangi olayların izleneceğini ve bunların nasıl işleneceğini tanımlayan subgraph bildirimini içeren bir YAML dosyası. +**subgraph.yaml**: hangi olayıların takip edileceğini ve bunların nasıl işleneceğini tanımlayan subgraph manifestosunu içeren bir YAML dosyası. -**schema.graphql**: subgrpah'iniz için hangi verilerin depolandığını ve bunun GraphQL aracılığıyla nasıl sorgulanacağını tanımlayan bir GraphQL şeması. +**schema.graphql**: Subgraph'iniz için hangi verilerin saklanacağını ve bu verilerin GraphQL aracılığıyla nasıl sorgulanabileceğini tanımlayan bir GraphQL şemasıdır. -**AssemblyScript Eşlemeleri**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) blok zinciri verilerinden şemanızda tanımlanan varlıklara çeviren kod. +**AssemblyScript Eşlemeleri:** blokzinciri verilerini şemanızda tanımlanan varlıklara dönüştüren [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) kodu. ### Subgraph Manifest Tanımı -Subgraph bildirimi (`subgraph.yaml`), subgraph için veri kaynaklarını, ilgilenilen tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken işlevleri (`işleyiciler`) tanımlar. Bir Cosmos subgrpah'i için örnek bir subgraph bildirimi için alt kısma göz atın: +Subgraph manifestosu (`subgraph.yaml`), subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları (`handlers`) tanımlar. Aşağıda bir Cosmos subgraph manifestosu örneği bulunmaktadır: ```yaml specVersion: 0.0.5 @@ -74,18 +74,18 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Cosmos subgraph'leri, yeni bir `tür` veri kaynağı sunar (`cosmos`). -- `Ağ`, Cosmos ekosistemindeki bir zincire karşılık gelmelidir. Örnekte, Cosmos Hub mainnet'i kullanılmıştır. +- Cosmos subgraph'leri, yeni bir veri kaynağı `türü` (`kind`) tanıtır (`cosmos`). +- `network` (ağ), Cosmos ekosistemindeki bir zinciri belirtmelidir. Örnekte, Cosmos Hub ana ağı kullanılmıştır. ### Şema Tanımı -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Şema tanımı, ortaya çıkan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri açıklar. Bu, orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla detay [burada](/developing/creating-a-subgraph/#the-graphql-schema) bulunabilir. ### AssemblyScript Eşlemeleri -Olayları işlemek için işleyiciler [AssemblyScript](https://www.assemblyscript.org/) içinde yazılmıştır. +Olayları işlemek için kullanılan işleyiciler [AssemblyScript](https://www.assemblyscript.org/) ile yazılmıştır. -Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +Cosmos endeksleme, [AssemblyScript API'ına](/subgraphs/developing/creating/graph-ts/api/) Cosmos'a özgü veri türlerini tanıtır. ```tsx class Block { @@ -165,30 +165,30 @@ class Any { Her işleyici türü, bir eşleme işlevine bağımsız değişken olarak iletilen kendi veri yapısıyla birlikte gelir. -- Blok işleyicileri `Block` tipini alır. -- Etkinlik işleyicileri, `EventData` türünü alır. -- İşlem işleyicileri, `TransactionData` türünü alır. -- Mesaj işleyicileri, `MessageData` tipini alır. +- Blok işleyiciler `Block` türünü alır. +- Olay işleyiciler `EventData` türünü alır. +- İşlem işleyiciler `TransactionData` türünü alır. +- Mesaj işleyiciler `MessageData` türünü alır. -`MessageData`'ın bir parçası olarak, mesaj işleyici, bir mesajı kapsayan bir işlemle ilgili en önemli bilgileri içeren bir işlem bağlamı alır. İşlem bağlamı, `EventData` türünde de mevcuttur, ancak yalnızca karşılık gelen olay bir işlemle ilişkilendirildiğinde. Ek olarak, tüm işleyiciler bir bloğa başvuru alır (`HeaderOnlyBlock`). +`MessageData`'nın bir parçası olarak mesaj işleyici, bir mesajı kapsayan işlemle ilgili en önemli bilgileri içeren bir işlem bağlamı alır. İşlem bağlamı, yalnızca ilgili olay bir işlemle ilişkili olduğunda `EventData` türünde de mevcuttur. Ek olarak, tüm işleyiciler bir bloka (`HeaderOnlyBlock`) referans alır. -Cosmos entegrasyonu türlerinin tam listesini [burada](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts) bulabilirsiniz. +Cosmos entegrasyonun tüm türlerinin listesine [buradan](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts) ulaşabilirsiniz. ### Mesaj çözme -It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. +Cosmos mesajlarının zincir-spesifik olduğunu ve bir subgraph'e [Protocol Buffers](https://protobuf.dev/) yükü olarak serileştirilmiş biçimde iletildiğini unutmamak önemlidir. Sonuç olarak, mesaj verileri işlenmeden önce bir eşleme fonksiyonunda çözülmelidir. -Bir subgraph'taki mesaj verisinin nasıl çözüleceğine dair bir örnek [burada](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts) bulunabilir. +Subgraph içinde mesaj verilerinin nasıl çözüleceğine dair bir örneğe [buradan](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts) ulaşabilirsiniz. ## Bir Cosmos subgraph'i inşa etme ve oluşturma -Alt çizge eşlemelerini yazmaya başlamadan önceki ilk adım, alt çizge şema dosyasında (`schema.graphql`) tanımlanan varlıklara dayalı tip bağlarını oluşturmaktır. Bu, eşleme işlevlerinin bu türlerde yeni nesneler oluşturmasına ve bunları depoya kaydetmesine izin verecektir. Bu, `codegen` CLI komutu kullanılarak yapılır: +Subgraph eşlemelerini yazmaya başlamadan önceki ilk adım, `schema.graphql` dosyasında tanımlanan varlıklar temelinde tür bağlamalarını oluşturmaktır. Bu, eşleme fonksiyonlarının bu türlerden yeni nesneler oluşturmasına ve bunları mağazaya kaydetmesine olanak tanır. Bu işlem, `codegen` CLI komutuyla gerçekleştirilir: ```bash $ graph codegen ``` -Eşlemeler hazır olduğunda, subgraph'in oluşturulması gerekir. Bu adım, bildirimde veya eşlemelerde olabilecek hataları vurgulayacaktır. Graph Node'una deploy edilmek için subgraph'in başarılı bir şekilde oluşturulması gerekir. `build` CLI komutu kullanılarak yapılabilir: +Eşlemeler hazır olduğunda, subgraph derlenmelidir. Bu adım, manifestoda veya eşlemelerde herhangi bir hata olup olmadığını gösterir. Bir subgraph, Graph Düğümü'ne dağıtılabilmesi için başarıyla derlenmelidir. Bu işlem, `build` CLI komutuyla gerçekleştirilebilir: ```bash $ graph build @@ -196,17 +196,17 @@ $ graph build ## Bir Cosmos subgraph'ini deploy etme -Subgraph'ınız oluşturulduktan sonra, `graph deploy` CLI komutunu kullanarak subgraph'ınızı dağıtabilirsiniz: +Subgraph oluşturulduktan sonra, `graph deploy` CLI komutunu kullanarak subgraph'inizi dağıtabilirsiniz: -**Subgraph Stüdyosu** +**Subgraph Studio** -Visit the Subgraph Studio to create a new subgraph. +Yeni bir subgraph oluşturmak için Subgraph Studio'yu ziyaret edin. ```bash graph deploy subgraph-name ``` -**Local Graph Node (based on default configuration):** +**Yerel Graph Düğümü (varsayılan yapılandırma temel alınarak):** ```bash graph create subgraph-name --node http://localhost:8020 @@ -218,7 +218,7 @@ graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost ## Bir Cosmos subgraph'ini sorgulama -The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. +Cosmos subgraphleri için GraphQL uç noktası, mevcut API arayüzü ile şema tanımına göre belirlenir. Daha fazla bilgi için [GraphQL API dokümantasyonuna](/subgraphs/querying/graphql-api/) göz atabilirsiniz. ## Desteklenen Cosmos Blok Zincirleri @@ -226,32 +226,32 @@ The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition #### Cosmos Hub Nedir? -The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. +[Cosmos Hub blokzinciri](https://hub.cosmos.network/), [Cosmos](https://cosmos.network/) ekosistemindeki ilk blokzinciridir. Daha fazla bilgi için [resmi dokümantasyonu] (https://docs.cosmos.network/) ziyaret edebilirsiniz. #### Ağlar -Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
    Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. +Cosmos Hub ana ağı `cosmoshub-4` olarak geçer. Cosmos Hub mevcut test ağı ise `theta-testnet-001` olarak geçer.
    `cosmoshub-3` gibi diğer Cosmos Hub ağları durdurulmuş olduğundan, bu ağlara veri sağlanmamaktadır. ### Osmosis -> Osmosis support in Graph Node and on Subgraph Studio is in beta: please contact the graph team with any questions about building Osmosis subgraphs! +> Graph Düğümü ve Subgraph Studio'daki Osmosis desteği beta aşamasındadır: Osmosis subgraphleri oluşturmayla ilgili sorularınız için The Graph ekibiyle iletişime geçebilirsiniz! #### Osmosis Nedir? -[Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. +[Osmosis](https://osmosis.zone/), Cosmos SDK üzerinde inşa edilmiş merkeziyetsiz, zincirler arası bir otomatik piyasa yapıcı (AMM) protokolüdür. Kullanıcıların özel likidite havuzları oluşturmasına ve IBC-uyumlu token'ları alıp satmasına olanak tanır. Daha fazla bilgi için [resmi dokümantasyonu](https://docs.osmosis.zone/) inceleyebilirsiniz. #### Ağlar -Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. +Osmosis ana ağı `osmosis-1` olarak geçer. Osmosis mevcut test ağı ise `osmo-test-4` olarak geçer. ## Örnek Subgraph'ler -Here are some example subgraphs for reference: +Aşağıda bazı örnek subgraph'leri bulabilirsiniz: -[Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Blok Filtreleme Örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) -[Validator Rewards Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) +[Validator Ödülleri Örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) -[Validator Delegations Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) +[Validator Delegasyonları Örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) -[Osmosis Token Swaps Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) +[Osmosis Token Takasları Örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) From 243e245014ddf0928d57fb799d42650ade482ddd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:53 -0500 Subject: [PATCH 0599/1534] New translations cosmos.mdx (Ukrainian) --- .../pages/uk/subgraphs/cookbook/cosmos.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/cosmos.mdx b/website/src/pages/uk/subgraphs/cookbook/cosmos.mdx index 37e42fc9638b..3e335b9d60c2 100644 --- a/website/src/pages/uk/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## Що таке Cosmos підграфи? -The Graph дозволяє розробникам проводити обробку подій блокчейну і робити отримані дані легко доступними за допомогою відкритого API GraphQL, відомого як підграф. [Graph Node](https://github.com/graphprotocol/graph-node) тепер може обробляти події на Cosmos, що означає, що розробники Cosmos тепер можуть створювати підграфи для легкого індексування подій у блокчейні. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. У підграфах на Cosmos підтримується чотири типи обробників: -- **Обробники блоків** запускаються щоразу, коли до мережі додається новий блок. -- **Обробники подій** запускаються, коли відбувається певна подія. -- **Обробники транзакцій** запускаються, коли виконується транзакція. -- **Обробники повідомлень** запускаються, коли надходить конкретне повідомлення. +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -Згідно з [офіційною документацією Cosmos](https://docs.cosmos.network/): +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Хоча до всіх даних можна отримати доступ за допомогою блок-обробника, інші обробники дозволяють розробникам підграфів обробляти дані у значно детальніший спосіб. @@ -37,15 +37,15 @@ The Graph дозволяє розробникам проводити оброб Визначення підграфа складається з трьох ключових компонентів: -**subgraph.yaml**: YAML-файл, що містить маніфест підграфів, який визначає, які події відстежувати і яким чином їх обробляти. +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: схема GraphQL, яка визначає, які дані зберігаються для вашого підграфа і як їх запитувати через GraphQL. +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) переводить дані блокчейну в елементи, визначені у вашій схемі. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### Визначення маніфесту підграфів -Маніфест підграфа (`subgraph.yaml`) визначає джерела даних для підграфа, тригери, що нас цікавлять, та функції (`handlers`), які слід запускати у відповідь на ці тригери. Нижче наведено приклад маніфесту підграфів для підграфа на Cosmos: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -74,8 +74,8 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Підграфи на Cosmos вводять новий `kind` джерела даних (`cosmos`). -- `Мережа блокчейну` повинна відповідати мережі в екосистемі Cosmos. У прикладі використовується основна мережа Cosmos Hub. +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### Визначення схеми @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript Mappings -Обробники для виконання подій написані на мові [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -165,30 +165,30 @@ class Any { Кожен тип обробника має власну структуру даних, яка передається як аргумент функції маппінгу. -- Обробники блоків отримують код типу `Block`. -- Обробники подій отримують код типу `EventData`. -- Обробники транзакцій отримують код типу `TransactionData`. -- Обробники повідомлень отримують код типу `MessageData`. +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -Як частина `MessageData` обробник повідомлення отримує контекст транзакції, який містить найважливішу інформацію про транзакцію, що охоплює повідомлення. Контекст транзакції також доступний у коді типу `EventData`, але лише тоді, коли відповідна подія пов'язана з транзакцією. Додатково всі обробники отримують посилання на блок (`HeaderOnlyBlock`). +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -Ви можете знайти повний список типів коду для інтеграції в Cosmos [тут](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### Розшифровка повідомлень It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -Приклад розшифровки даних повідомлення в підграфі можна знайти [тут](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Створення та побудова підграфа на Cosmos -Першим кроком перед початком написання схем підграфів є генерація приналежності типів на основі елементів, які були визначені у файлі схеми підграфів (`schema.graphql`). Це дозволить функціям схем створювати нові об'єкти цих типів і зберігати їх у сховищі. Це робиться за допомогою використання CLI команди `codegen`: +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ graph codegen ``` -Після того, як схеми готові, потрібно побудувати підграф. На цьому кроці буде показано всі помилки, які можуть бути у маніфесті або схемах. Підграф має бути успішно побудований для того, щоб його можна було розгорнути у Graph Node. Це можна зробити, використовуючи CLI команду `build`: +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build @@ -198,7 +198,7 @@ $ graph build Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: -**Субграф Студія** +**Subgraph Studio** Visit the Subgraph Studio to create a new subgraph. From d4547f180abc368f3b70313722453e0726877653 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:54 -0500 Subject: [PATCH 0600/1534] New translations cosmos.mdx (Chinese Simplified) --- .../pages/zh/subgraphs/cookbook/cosmos.mdx | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/cosmos.mdx b/website/src/pages/zh/subgraphs/cookbook/cosmos.mdx index 475d0cb1108a..3958b67df717 100644 --- a/website/src/pages/zh/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## Cosmos 子图是什么? -Graph 为开发人员提供了一种被称为子图的工具,利用这个工具,开发人员能够处理区块链事件,并通过 GraphQL API 提供结果数据。 [Graph 节点](https://github.com/graphprotocol/graph-node)现在能够处理 Cosmos 事件,这意味着 Cosmos 开发人员现在可以构建子图来索引链上事件。 +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. Cosmos 子图目前支持四种类型的处理程序: -- 每当一个新的区块被追加到链中时,**区块处理程序**就会运行。 -- **事件处理程序**在发出特定事件时运行。 -- **交易处理程序**在发生交易时运行。 -- **消息处理程序**在发生特定消息时运行。 +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -根据[Cosmos的正式文件](https://docs.cosmos.network/): +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. 尽管所有数据都可以通过区块处理程序访问,但其他处理程序使子图开发人员能够以更细粒度的方式处理数据。 @@ -37,21 +37,21 @@ Cosmos 子图目前支持四种类型的处理程序: 定义子图有三个关键部分: -**subgraph.yaml**: 包含子图清单的 YAML 文件,标识需要跟踪以及如何处理哪些事件。 +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: 一个 GraphQL 模式文件,定义了为子图存储哪些数据,以及如何通过 GraphQL 查询这些数据。 +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**AssemblyScript 映射**: 将区块链数据转换为模式文件中定义的实体的[AssemblyScript](https://github.com/AssemblyScript/assemblyscript) 代码。 +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### 子图清单定义 -子图清单(`subgraph.yaml`)标识子图的数据源、感兴趣的触发器以及应该响应这些触发器而运行的函数(`处理程序`)。下面是 Cosmos 子图的子图清单示例: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 description: Cosmos Subgraph Example schema: - file: ./schema.graphql #指向模式文件的链接 + file: ./schema.graphql # link to the schema file dataSources: - kind: cosmos name: CosmosHub @@ -62,20 +62,20 @@ dataSources: apiVersion: 0.0.7 language: wasm/assemblyscript blockHandlers: - - handler: handleNewBlock # 映射文件中的函数名称 + - handler: handleNewBlock # the function name in the mapping file eventHandlers: - - event: rewards #将要处理的事件类型 - handler: handleReward # 映射文件中的函数名称 + - event: rewards # the type of the event that will be handled + handler: handleReward # the function name in the mapping file transactionHandlers: - - handler: handleTransaction # 映射文件中的函数名称 + - handler: handleTransaction # the function name in the mapping file messageHandlers: - - message: /cosmos.staking.v1beta1.MsgDelegate # 消息的类型 - handler: handleMsgDelegate #映射文件中的函数名称 - file: ./src/mapping.ts #指向包含Assemblyscript映射的文件的链接 + - message: /cosmos.staking.v1beta1.MsgDelegate # the type of a message + handler: handleMsgDelegate # the function name in the mapping file + file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Cosmos子图引入了一`种`新的数据源(`Cosmos`)。 -- 该`网络`应该对应于 Cosmos 生态系统中的一个链。在示例中,使用了Cosmos Hub主网。 +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### 模式定义 @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript 映射 -处理事件的处理程序是用 [AssemblyScript](https://www.assemblyscript.org/) 编写的。 +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -163,42 +163,42 @@ class Any { } ``` -每个处理程序类型都有自己的数据结构,这些数据结构作为参数传递给映射函数。 +Each handler type comes with its own data structure that is passed as an argument to a mapping function. -- 区块处理程序接收` Block` 类型。 -- 事件处理程序接收 `EventData` 类型。 -- 交易处理程序接收 `TransactionData` 类型。 -- 消息处理程序接收 `MessageData` 类型。 +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -作为 `MessageData` 的一部分,消息处理程序接收交易内容,其中包含有关组成消息的交易的最重要信息。交易消息在` EventData` 类型中也可用,但只有当相应的事件与交易关联时才可用。此外,所有处理程序都接收到对区块的引用(`HeaderOnlyBlock`)。 +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -您可以在[这里](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts)找到 Cosmos 集成的完整类型列表。 +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### 消息解码 It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -一个在子图中解码消息数据的示例:[](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts)。 +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## 创建和构建 Cosmos 子图 -开始编写子图映射之前的第一步是根据子图模式文件(`schema.Graphql`)中定义的实体生成类型绑定。这将允许映射函数创建这些类型的新对象并将它们保存到存储中。这是通过使用 `codegen `CLI 命令完成的: +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ graph codegen ``` -一旦映射就绪,就需要构建子图。此步骤将标记出清单或映射可能具有的任何错误。为了将子图部署到 Graph 节点,需要成功构建子图。可以使用`build` CLI 命令来完成: +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build ``` -## 部署 Cosmos 子图 +## Deploying a Cosmos subgraph -创建子图后,您可以使用 `graph deploy` CLI 命令部署子图: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: -**子图工作室** +**Subgraph Studio** Visit the Subgraph Studio to create a new subgraph. @@ -216,15 +216,15 @@ graph create subgraph-name --node http://localhost:8020 graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -## 查询 Cosmos 子图 +## Querying a Cosmos subgraph The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. -## 受支持的Cosmos区块链 +## Supported Cosmos Blockchains ### Cosmos Hub -#### 什么是Cosmos Hub? +#### What is Cosmos Hub? The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. @@ -236,7 +236,7 @@ Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testne > Osmosis support in Graph Node and on Subgraph Studio is in beta: please contact the graph team with any questions about building Osmosis subgraphs! -#### Osmosis是什么? +#### What is Osmosis? [Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. From 2c3c5fe2c4543b9728cd2db2b0d318af41dcc8a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:55 -0500 Subject: [PATCH 0601/1534] New translations cosmos.mdx (Urdu (Pakistan)) --- .../pages/ur/subgraphs/cookbook/cosmos.mdx | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/cosmos.mdx b/website/src/pages/ur/subgraphs/cookbook/cosmos.mdx index 16221134bb54..e6cdbe1a2dac 100644 --- a/website/src/pages/ur/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## کوزموس کے سب گراف کیا ہیں ؟ -گراف ڈویلپرز کو بلاکچین ایونٹس پر کارروائی کرنے اور نتیجے میں آنے والے ڈیٹا کو ای کھلے GraphQL API کے ذریعے آسانی سے دستیاب کرنے کی اجازت دیتا ہے، جسے سب گراف کہا جاتا ہے۔ [گراف نوڈ](https://github.com/graphprotocol/graph-node) اب کوزموس ایونٹس پر کارروائی کرنے کے قابل ہے، جس کا مطلب ہے کہ کوزموس ڈویلپرز اب آسانی سے آن چین ایونٹس کو انڈیکس کرنے کے لیے سب گراف بنا سکتے ہیں. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. کوزموس سب گراف میں چار قسم کے ہینڈلرز کی حمایت کی جاتی ہے: -- **بلاک ہینڈلرز** چلتے ہیں جب بھی چین میں نیا بلاک شامل کیا جاتا ہے. -- **ایونٹ ہینڈلرز** اس وقت چلتے ہیں جب کوئی مخصوص ایونٹ خارج ہوتا ہے. -- جب کوئیٹرانزیکشن ہوتا ہے تو **ٹرانزیکشن ہینڈلرز** چلتے ہیں. -- **میسج ہینڈلرز** اس وقت چلتے ہیں جب کوئی مخصوص پیغام آتا ہے. +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -[سرکاری کوزموس دستاویزات](https://docs.cosmos.network/) کی بنیاد پر: +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. اگرچہ تمام ڈیٹا تک بلاک ہینڈلر کے ذریعے رسائی حاصل کی جا سکتی ہے، دوسرے ہینڈلرز سب گراف ڈویلپرز کو زیادہ دانے دار طریقے سے ڈیٹا پر کارروائی کرنے کے قابل بناتے ہیں. @@ -37,15 +37,15 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co جب سب گراف کی وضاحت کی بات آتی ہے تو تین اہم حصے ہوتے ہیں: -** subgraph.yaml **: ایک YAML فائل جو کے سب گراف مینی فیسٹ پر مشتمل ہے, پتا لگاتا ہے کن ایونٹس کا پتا رکھنا ہے اور کیسے ان پر عمل کرنا ہے. +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**:ایک گراف کیو ایل اسکیما ہے جو بیان کرتا ہے کےآپ کے سب گراف کے لیے کونسا ڈیٹا ذخیرہ ہے، اور اسے کیوری کیسے کرنا ہے گراف کیو ایل کا استئمال کرتے ہوۓ. +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -** AssemblyScript Mappings **: [اسمبلی اسکرپٹ ](https://github.com/AssemblyScript/assemblyscript) کوڈ جو بلاک چین ڈیٹا کا ترجمہ ان ہستیوں میں کرتا ہے جو آپ کی اسکیما میں موجود ہوں. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### سب گراف مینی فیسٹ کی تعریف -سب گراف مینی فیسٹ (`subgraph.yaml`) سب گراف کے ڈیٹا کے ذرائع کا پتا لگاتا ہے, دلچسپی کے محرکات, اور افعال (`handlers`) جو ان محرکات کے جواب میں چلتے ہیں. کوزموس سب گراف کے لیے نیچے دی گئ سب گراف کی مثال دیکھیں: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -74,8 +74,8 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- کوزموس سب گراف ڈیٹا کے ذرائع (`cosmos`) کی ایک نئ `kind` متعارف کراتا ہے. -- `network` کو کوزموس ایکو سسٹم میں ایک سلسلہ کے مطابق ہونا چاہیے۔ مثال میں، کوزموس ہب مین نیٹ استعمال کیا جاتا ہے. +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### اسکیما کی تعریف @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### اسمبلی اسکرپٹ سب میپنک -پروسیسنگ ایونٹس کے ہینڈلرز [اسمبلی اسکرپٹ](https://www.assemblyscript.org/) میں لکھے گئے ہیں. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -165,30 +165,30 @@ class Any { ہر ہینڈلر کی قسم اس کے اپنے ڈیٹا ڈھانچے کے ساتھ آتی ہے جو میپنگ فنکشن کی دلیل کے طور پر پاس کی جاتی ہے. -- بلاک ہینڈلرز کو `Block` قسم موصول ہوتی ہے. -- ایونٹ ہینڈلرز کو `EventData` قسم موصول ہوتی ہے. -- ٹرانزیکشن ہینڈلرز کو `TransactionData` قسم موصول ہوتی ہے. -- میسج ہینڈلرز کو `MessageData` قسم موصول ہوتی ہے. +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -`MessageData` کے ایک حصے کے طور پر میسج ہینڈلر کو ایک ٹرانزیکشن کا سیاق و سباق ملتا ہے، جس میں کسی ٹرانزیکشن کے بارے میں سب سے اہم معلومات ہوتی ہے جس میں پیغام شامل ہوتا ہے۔ ٹرانزیکشن کا سیاق و سباق `EventData` قسم میں بھی دستیاب ہے، لیکن صرف اس صورت میں جب متعلقہ واقعہ کسی ٹرانزیکشن سے وابستہ ہو۔ مزید برآں، تمام ہینڈلرز کو بلاک (`HeaderOnlyBlock`) کا حوالہ ملتا ہے. +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -آپ کوزموس انضمام کے لیے اقسام کی مکمل فہرست [یہاں](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts) حاصل کر سکتے ہیں. +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### پیغام کی ضابطہ کشائی It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -سب گراف میں میسج کے ڈیٹا کو ڈی کوڈ کرنے کے طریقے کی ایک مثال [ یہاں](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts) مل سکتی ہے. +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## کوزموس سب گراف بنانا اور تعمیر کرنا -سب گراف میپنگ لکھنا شروع کرنے سے پہلے پہلا قدم ان ہستیوں کی بنیاد پر ٹائپ بائنڈنگز تیار کرنا ہے جن کی وضاحت سب گراف سکیما فائل (`schema.graphql`) میں کی گئی ہے۔ یہ میپنگ کے افعال کو ان اقسام کی نئی اشیاء بنانے اور انہیں اسٹور میں محفوظ کرنے کی اجازت دے گا۔ یہ `codegen` CLI کمانڈ استعمال کرکے کیا جاتا ہے: +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ graph codegen ``` -میپنگ کے تیار ہونے کے بعد، سب گراف کو تعمیر کرنے کی ضرورت ہے۔ یہ مرحلہ مینی فیسٹ یا میپنگ میں ہونے والی کسی بھی خامی کو نمایاں کرے گا۔ گراف نوڈ پر تعینات کرنے کے لیے ایک سب گراف کو کامیابی سے بنانے کی ضرورت ہے۔ یہ `build` CLI کمانڈ کا استعمال کرتے ہوئے کیا جا سکتا ہے: +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build @@ -196,9 +196,9 @@ $ graph build ## کوزموس سب گراف کو تعینات کرنا -ایک دفا آپ کا سب گراف بن گیا ہے، آپ اپنا سب گراف `graph deploy` کی CLI کمانڈ کا استعمال کرتے ہوۓ تعینات کر سکتے ہیں: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: -**سب گراف سٹوڈیو** +**Subgraph Studio** Visit the Subgraph Studio to create a new subgraph. From b737af5db73cc141edf1eb58957617747ba9091f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:56 -0500 Subject: [PATCH 0602/1534] New translations cosmos.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/cookbook/cosmos.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/cosmos.mdx b/website/src/pages/vi/subgraphs/cookbook/cosmos.mdx index e58bb266b3b5..1d68d3cdf7f0 100644 --- a/website/src/pages/vi/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/cosmos.mdx @@ -6,7 +6,7 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## What are Cosmos subgraphs? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. There are four types of handlers supported in Cosmos subgraphs: @@ -18,9 +18,9 @@ There are four types of handlers supported in Cosmos subgraphs: Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. From 57187bcb6095ac1787eb3d84f0b3ec1bdc39933f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:57 -0500 Subject: [PATCH 0603/1534] New translations cosmos.mdx (Marathi) --- .../pages/mr/subgraphs/cookbook/cosmos.mdx | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/cosmos.mdx b/website/src/pages/mr/subgraphs/cookbook/cosmos.mdx index 47ff05bda552..16b5b216fd3a 100644 --- a/website/src/pages/mr/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## कॉसमॉस सबग्राफ काय आहेत? -आलेख विकसकांना ब्लॉकचेन इव्हेंट्सवर प्रक्रिया करण्यास आणि परिणामी डेटा ओपन ग्राफक्यूएल API द्वारे सहज उपलब्ध करून देतो, ज्याला सबग्राफ म्हणून ओळखले जाते. [Graph Node](https://github.com/graphprotocol/graph-node) आता Cosmos इव्हेंटवर प्रक्रिया करण्यास सक्षम आहे, याचा अर्थ Cosmos डेव्हलपर आता ऑन-चेन इव्हेंट सहजपणे अनुक्रमित करण्यासाठी सबग्राफ तयार करू शकतात. +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. कॉसमॉस सबग्राफमध्ये चार प्रकारचे हँडलर समर्थित आहेत: -- **ब्लॉक हँडलर** जेव्हा जेव्हा साखळीला नवीन ब्लॉक जोडला जातो तेव्हा चालतात. +- **Block handlers** run whenever a new block is appended to the chain. - **Event handlers** run when a specific event is emitted. -- जेव्हा व्यवहार होतो तेव्हा **व्यवहार हाताळणारे** चालतात. -- जेव्हा एखादा विशिष्ट संदेश येतो तेव्हा **मेसेज हँडलर** चालतात. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -[अधिकृत कॉसमॉस दस्तऐवजीकरण](https://docs.cosmos.network/) वर आधारित: +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. जरी सर्व डेटा ब्लॉक हँडलरने ऍक्सेस केला जाऊ शकतो, इतर हँडलर्स सबग्राफ डेव्हलपरला डेटावर अधिक बारीक पद्धतीने प्रक्रिया करण्यास सक्षम करतात. @@ -37,15 +37,15 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co सबग्राफ परिभाषित करताना तीन प्रमुख भाग आहेत: -**subgraph.yaml**: सबग्राफ मॅनिफेस्ट असलेली YAML फाइल, जी कोणत्या इव्हेंटचा मागोवा घ्यायचा आणि त्यावर प्रक्रिया कशी करायची हे ओळखते. +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: एक GraphQL स्कीमा जो आपल्या सबग्राफसाठी कोणता डेटा संग्रहित केला जातो आणि GraphQL द्वारे त्याची क्वेरी कशी करावी हे परिभाषित करते. +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**AssemblyScript मॅपिंग**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) कोड जो ब्लॉकचेन डेटावरून परिभाषित घटकांमध्ये अनुवादित करतो तुमच्या स्कीमामध्ये. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### सबग्राफ मॅनिफेस्ट व्याख्या -सबग्राफ मॅनिफेस्ट (`subgraph.yaml`) सबग्राफसाठी डेटा स्रोत, स्वारस्य ट्रिगर आणि फंक्शन्स (`हँडलर`) ओळखतो जे त्या ट्रिगर्सना प्रतिसाद म्हणून चालवले जावेत. कॉसमॉस सबग्राफसाठी उदाहरण सबग्राफ मॅनिफेस्टसाठी खाली पहा: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -74,8 +74,8 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- कॉसमॉस सबग्राफ डेटा स्त्रोताचा एक नवीन `प्रकार` सादर करतात (`कॉसमॉस`). -- `नेटवर्क` कॉसमॉस इकोसिस्टममधील साखळीशी संबंधित असले पाहिजे. उदाहरणामध्ये, कॉसमॉस हब मेननेट वापरला जातो. +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### स्कीमा व्याख्या @@ -83,7 +83,7 @@ Schema definition describes the structure of the resulting subgraph database and ### असेंबलीस्क्रिप्ट मॅपिंग -इव्हेंटवर प्रक्रिया करण्यासाठी हँडलर [AssemblyScript](https://www.assemblyscript.org/) मध्ये लिहिलेले आहेत. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -165,30 +165,30 @@ class Any { प्रत्येक हँडलर प्रकार त्याच्या स्वतःच्या डेटा स्ट्रक्चरसह येतो जो मॅपिंग फंक्शनला युक्तिवाद म्हणून पास केला जातो. -- ब्लॉक हँडलर्सना `ब्लॉक` प्रकार प्राप्त होतो. -- इव्हेंट हँडलर्सना `EventData` प्रकार प्राप्त होतो. -- व्यवहार हाताळणार्‍यांना `TransactionData` प्रकार प्राप्त होतो. -- मेसेज हँडलर्सना `MessageData` प्रकार प्राप्त होतो. +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -`MessageData` चा एक भाग म्हणून मेसेज हँडलरला एक व्यवहार संदर्भ प्राप्त होतो, ज्यामध्ये मेसेज समाविष्ट असलेल्या व्यवहाराविषयी सर्वात महत्वाची माहिती असते. व्यवहार संदर्भ `इव्हेंट डेटा` प्रकारात देखील उपलब्ध आहे, परंतु जेव्हा संबंधित इव्हेंट व्यवहाराशी संबंधित असेल तेव्हाच. याव्यतिरिक्त, सर्व हँडलर्सना ब्लॉकचा संदर्भ प्राप्त होतो (`HeaderOnlyBlock`). +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -तुम्हाला कॉसमॉस इंटिग्रेशनच्या प्रकारांची संपूर्ण सूची [येथे](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts) मिळेल. +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### संदेश डीकोडिंग It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -सबग्राफमध्ये संदेश डेटा कसा डीकोड करायचा याचे उदाहरण [येथे](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts) आढळू शकते. +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## कॉसमॉस सबग्राफ तयार करणे आणि तयार करणे -सबग्राफ मॅपिंग लिहिण्यास सुरुवात करण्यापूर्वी पहिली पायरी म्हणजे सबग्राफ स्किमा फाईल (`schema.graphql`) मध्ये परिभाषित केलेल्या घटकांवर आधारित प्रकार बाइंडिंग तयार करणे. हे मॅपिंग फंक्शन्सना त्या प्रकारच्या नवीन वस्तू तयार करण्यास आणि त्यांना स्टोअरमध्ये जतन करण्यास अनुमती देईल. हे `codegen` CLI कमांड वापरून केले जाते: +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ आलेख कोडजेन ``` -मॅपिंग तयार झाल्यावर, सबग्राफ तयार करणे आवश्यक आहे. ही पायरी मॅनिफेस्ट किंवा मॅपिंगमधील त्रुटी हायलाइट करेल. ग्राफ नोडवर तैनात करण्यासाठी सबग्राफ यशस्वीरित्या तयार करणे आवश्यक आहे. हे `build` CLI कमांड वापरून केले जाऊ शकते: +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ आलेख बिल्ड @@ -196,7 +196,7 @@ $ आलेख बिल्ड ## कॉसमॉस सबग्राफ तैनात करणे -एकदा तुमचा सबग्राफ तयार झाला की, तुम्ही `graph deploy` CLI कमांड वापरून तुमचा सबग्राफ उपयोजित करू शकता: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: **Subgraph Studio** From e1a64b72d55922f855dd912cb2b633fd29d20391 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:58 -0500 Subject: [PATCH 0604/1534] New translations cosmos.mdx (Hindi) --- .../pages/hi/subgraphs/cookbook/cosmos.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/cosmos.mdx b/website/src/pages/hi/subgraphs/cookbook/cosmos.mdx index d62a6ce89a05..ed1e762363fc 100644 --- a/website/src/pages/hi/subgraphs/cookbook/cosmos.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/cosmos.mdx @@ -6,21 +6,21 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co ## कॉसमॉस सब-ग्राफ्स क्या होते हैं? -द ग्राफ की मदद से डेवेलपर्स ब्लॉकचेन की गतिविधियों को प्रोसेस कर सकते हैं और उससे आने वाले डाटा को आसानी से GraphQL एपीआई की मदद से उपलब्ध करवा सकते हैं जिन्हे, सब-ग्राफ कहा जाता है| [ग्राफ नोड](https://github.com/graphprotocol/graph-node) अब कॉसमॉस की गतिविधियों को प्रोसेस करने में सक्षम है जिसका मतलब यह है की अब डेवेलपर्स चेन पर होने वाली गतिविधियों पर नज़र रखने के लिए आसानी से सब-ग्राफ्स बना सकते हैं| +The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index onchain events. कॉसमॉस सब-ग्राफ्स में कुल चार प्रकार के हैंडलर्स सहयोगी हैं: -- **ब्लॉक हैंडलर्स** तब चलते हैं जब कोई नया ब्लॉक चेन में जुड़ता है| -- **इवेंट हैंडलर्स** तब चलते हैं जब कोई विशिष्ट गतिविधि उत्पन्न हो| -- **ट्रांसक्शन हैंडलर्स** तब चलते हैं जब कोई ट्रांसक्शन होता है| -- **मैसेज हैंडलर्स** तब चलते हैं जब कोई विशिष्ट सन्देश आये| +- **Block handlers** run whenever a new block is appended to the chain. +- **Event handlers** run when a specific event is emitted. +- **Transaction handlers** run when a transaction occurs. +- **Message handlers** run when a specific message occurs. -[आधिकारिक कॉसमॉस डॉक्यूमेंटेशन](https://docs.cosmos.network/) के अनुसार: +Based on the [official Cosmos documentation](https://docs.cosmos.network/): > [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - +> > [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - +> > [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. जबकि सारा डाटा एक ब्लॉक हैंडलर की मदद से प्राप्त किया जा सकता है, बाकी हैंडलर्स सब-ग्राफ डेवलपरों को डाटा अधिक बारीक तरह से प्रोसेस करने में सहायता करते हैं| @@ -37,15 +37,15 @@ This guide is an introduction on building subgraphs indexing [Cosmos](https://co सब-ग्राफ्स को परिभासित करने के तीन मुख्या अंग हैं: -**subgraph.yaml**: एक YAML फाइल जिसमे सब-ग्राफ मैनिफेस्ट फाइल होती है, जो कि इवेंट्स की पहचान करती है और उन पर नज़र रखती है| +**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. -**schema.graphql**: एक ग्राफक्यूएल स्कीमा जो कौन सा डाटा आपके सुब-ग्राफ ेमिन स्टोर करना है इसको प्रभासित करती है और बताती है कि उसे ग्राफक्यूएल के द्वारा कैसे क्वेरी करना है| +**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. -**असेंबली स्क्रिप्ट मैप्पिंग्स**: [असेंबली स्क्रिप्ट](https://github.com/AssemblyScript/assemblyscript) कोड जो कि ब्लॉकचैन के डाटा को आपकी स्कीमा के अनुसार अनुवादित करता है| +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. ### सब ग्राफ मैनिफेस्ट की परिभाषा -सब-ग्राफ मैनिफेस्ट (`subgraph.yaml`) सब-ग्राफ के डाटा सोर्स, ट्रिगर ऑफ़ इंटरेस्ट, और फंक्शन्स (`handlers`) की पहचान करता है जो कि उन ट्रिगर के जवाब में चलाये जाने चाहिए| कॉसमॉस सब-ग्राफ के उदहारण मैनिफेस्ट के लिए नीचे देखें: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: ```yaml specVersion: 0.0.5 @@ -74,8 +74,8 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- कॉसमॉस सब-ग्राफ्स एक नए `प्रकार` का डाटा सोर्स ले कर आते हैं (`कॉसमॉस`) -- `नेटवर्क` कॉसमॉस इकोसिस्टम के किसी चैन के अनुरूप होना चाहिए| उदाहरण में कॉसमॉस हब मैन नेट का उपयोग किया गया था| +- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). +- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. ### स्कीमा की परिभाषा @@ -83,9 +83,9 @@ Schema definition describes the structure of the resulting subgraph database and ### असेंबली स्क्रिप्ट मैप्पिंग्स -इवेंट्स को प्रोसेस करने के हैंडलर्स [असेंबली स्क्रिप्ट ](https://www.assemblyscript.org/) में लिखे गए हैं| +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). -कॉसमॉस इंडेक्सिंग कॉसमॉस-विशिष्ट डाटा प्रकारो को [असेंबली स्क्रिप्ट ए पी आई](/subgraphs/developing/creating/graph-ts/api/) में ले कर आती है| +Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). ```tsx class Block { @@ -165,14 +165,14 @@ class Any { हर हैंडलर प्रकार अपने खुद के डाटा स्ट्रक्चर के साथ आता है जिसे आर्गुमेंट की तरह मैपिंग फंक्शन में पास किया जा सकता है| -- ब्लॉक हैंडलर्स को `ब्लॉक` प्रकार मिलता है| -- इवेंट हैंडलर्स को `EventData` प्रकार मिलता है| -- ट्रांसक्शन हैंडलर्स `TransactionData` प्रकार प्राप्त करते हैं | -- मैसेज हैंडलर्स `MessageData` प्रकार प्राप्त करते हैं| +- Block handlers receive the `Block` type. +- Event handlers receive the `EventData` type. +- Transaction handlers receive the `TransactionData` type. +- Message handlers receive the `MessageData` type. -`MessageData` के हिस्से के तौर पर मैसेज हैंडलर एक ट्रांसक्शन का प्रसंग प्राप्त करता है जिसमे मैसेज के अंतर्गत कई महत्वपूर्ण जानकारियाँ रहती हैं| यह ट्रांसक्शन का प्रसंग `EventData` प्रकार में भी मौजूद रहता है लेकिन तब हीं जब तदनुसार गतिविधि ट्रांसक्शन से सम्बंधित हो| इसके अतिरिक्त सभी हैंडलर्स ब्लॉक का एक सन्दर्भ प्राप्त करते हैं (`HeaderOnlyBlock`)| +As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). -आप सभी प्रकार से कॉसमॉस से एकीकरण करने की सभी जानकारियां [यहाँ](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts) प्राप्त कर सकते है| +You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). ### मैसेज डिकोडिंग @@ -182,13 +182,13 @@ An example of how to decode message data in a subgraph can be found [here](https ## कॉसमॉस सब-ग्राफ्स बनाना और निर्माण करना -सब-ग्राफ मैपिंग लिखने से पहले पहला कदम सब-ग्राफ स्कीमा (`schema.graphql`) में परिभाषित इकाइयों के अनुसार टाइप बाईनडिंग्स बनाना होता है| इसकी वजह से मैपिंग फंक्शन नए ओब्जेक्टब बना कर उन्हें सेव कर सकेंगे| यह करने के लिए `codegen` का इस्तेमाल किया जाता है| +The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: ```bash $ graph codegen ``` -एक बार मप्पिंग्स तैयार हो जाएं, फिर सब-ग्राफ को बनाना होगा| यह कदम मैनिफेस्ट या मैपिंग की त्रुटियों को उभार कर दिखायेगा| ग्राफ नोड पर डेप्लॉय करने के लिए एक सुब-ग्राफ को सफलतापूर्वक बनाना आवश्यक है| यह करने के लिए `build` कमांड का इस्तेमाल किया जा सकता है| +Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: ```bash $ graph build @@ -196,7 +196,7 @@ $ graph build ## एक कॉसमॉस सब-ग्राफ डेप्लॉय करना -एक बार आपका सबग्राफ बन जाने के बाद, आप `graph deploy` CLI कमांड का उपयोग करके अपना सबग्राफ डिप्लॉय कर सकते हैं: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: **Subgraph Studio** From 58618928a0ba7111ccf5c31c01eee3b1ad014deb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:54:59 -0500 Subject: [PATCH 0605/1534] New translations derivedfrom.mdx (Romanian) --- website/src/pages/ro/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ro/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ro/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/ro/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From e7b34d513dc337dd779a80fdeecef8db98598d52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:00 -0500 Subject: [PATCH 0606/1534] New translations derivedfrom.mdx (French) --- .../fr/subgraphs/cookbook/derivedfrom.mdx | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/fr/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..24c6c716f840 100644 --- a/website/src/pages/fr/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/derivedfrom.mdx @@ -1,28 +1,29 @@ --- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +title: Bonne pratique pour les subgraphs 2 - Améliorer la Réactivité de l'Indexation et des Requêtes en Utilisant @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. +Les tableaux dans votre schéma peuvent vraiment ralentir les performances d'un subgraph lorsqu'ils dépassent des milliers d'entrées. Si possible, la directive `@derivedFrom` devrait être utilisée lors de l'utilisation des tableaux car elle empêche la formation de grands tableaux, simplifie les gestionnaires et réduit la taille des entités individuelles, améliorant considérablement la vitesse d'indexation et la performance des requêtes. -## How to Use the `@derivedFrom` Directive +## Comment Utiliser la Directive `@derivedFrom` -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: +Il vous suffit d'ajouter une directive `@derivedFrom` après votre tableau dans votre schéma. Comme ceci : ```graphql comments: [Comment!]! @derivedFrom(field: "post") ``` -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. +`@derivedFrom` crée des relations efficaces de un à plusieurs, permettant à une entité de s'associer dynamiquement à plusieurs entités liées en fonction d'un champ dans l'entité liée. Cette approche élimine la nécessité pour les deux côtés de la relation de stocker des données dupliquées, rendant le subgraph plus efficace. -### Example Use Case for `@derivedFrom` +### Exemple de cas d'utilisation de `@derivedFrom` -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. +Un exemple de tableau à croissance dynamique est une plateforme de blogs où un "Post" peut avoir de nombreux "Comments"(Commentaires). -Let’s start with our two entities, `Post` and `Comment` +Commençons avec nos deux entités, `Post` et `Comment` -Without optimization, you could implement it like this with an array: +Sans optimisation, vous pourriez implémenter cela avec un tableau : ```graphql type Post @entity { @@ -38,9 +39,9 @@ type Comment @entity { } ``` -Arrays like these will effectively store extra Comments data on the Post side of the relationship. +Les tableaux comme ceux-ci stockeront effectivement des données supplémentaires de Comments du côté Post de la relation. -Here’s what an optimized version looks like using `@derivedFrom`: +Voici à quoi ressemble une version optimisée utilisant `@derivedFrom`: ```graphql type Post @entity { @@ -57,13 +58,13 @@ type Comment @entity { } ``` -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. +En ajoutant simplement la directive `@derivedFrom`, ce schéma ne stockera les "Comments" que du côté "Comments" de la relation et non du côté "Post" de la relation. Les tableaux sont stockés sur des lignes individuelles, ce qui leur permet de s'étendre de manière significative. Cela peut entraîner des tailles particulièrement grandes si leur croissance est illimitée. -This will not only make our subgraph more efficient, but it will also unlock three features: +Cela rendra non seulement notre subgraph plus efficace, mais débloquera également trois fonctionnalités : -1. We can query the `Post` and see all of its comments. +1. Nous pouvons interroger le `Post` et voir tous ses commentaires. -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. +2. Nous pouvons faire une recherche inverse et interroger n'importe quel Commentaire et voir de quel post il provient. 3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. From c31c7f65eb071170ad5dcc04df0d27101d662ddb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:01 -0500 Subject: [PATCH 0607/1534] New translations derivedfrom.mdx (Spanish) --- website/src/pages/es/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/es/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/es/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/es/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/es/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From 4dfabe1e9cbefe8be027bb72edeab63f9344aed3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:02 -0500 Subject: [PATCH 0608/1534] New translations derivedfrom.mdx (Arabic) --- website/src/pages/ar/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ar/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ar/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/ar/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From 96df3adc49d31c41ed0e74aa596d732db5d73d23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:03 -0500 Subject: [PATCH 0609/1534] New translations derivedfrom.mdx (Czech) --- website/src/pages/cs/subgraphs/cookbook/derivedfrom.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/cs/subgraphs/cookbook/derivedfrom.mdx index 1e78f786b696..b0446f066ba8 100644 --- a/website/src/pages/cs/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Podgraf Doporučený postup 2 - Zlepšení indexování a rychlosti dotazů pomocí @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR @@ -65,7 +66,7 @@ Tím se nejen zefektivní náš podgraf, ale také se odemknou tři funkce: 2. Můžeme provést zpětné vyhledávání a dotazovat se na jakýkoli `Komentář` a zjistit, ze kterého příspěvku pochází. -3. Pomocí [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) můžeme odemknout možnost přímého přístupu a manipulace s daty z virtuálních vztahů v našich mapováních podgrafů. +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. ## Závěr From 56f7db056a8216717269e8305cae9b2d310c45fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:04 -0500 Subject: [PATCH 0610/1534] New translations derivedfrom.mdx (German) --- website/src/pages/de/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/de/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/de/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/de/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/de/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From 8f6fbf70cf284dd821c3d54c97e817d414b61856 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:05 -0500 Subject: [PATCH 0611/1534] New translations derivedfrom.mdx (Italian) --- website/src/pages/it/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/it/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/it/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/it/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/it/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From 39c5e05e735452740b8f657809a388fd2a6ac6b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:06 -0500 Subject: [PATCH 0612/1534] New translations derivedfrom.mdx (Japanese) --- website/src/pages/ja/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ja/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ja/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/ja/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From 50f31aadb59b5210beb76f41f119998ebe1d2882 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:07 -0500 Subject: [PATCH 0613/1534] New translations derivedfrom.mdx (Korean) --- website/src/pages/ko/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ko/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ko/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/ko/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From aa2c8e01dfbaf2bf2fc0c741539ab6ce4bb8859f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:08 -0500 Subject: [PATCH 0614/1534] New translations derivedfrom.mdx (Dutch) --- website/src/pages/nl/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/nl/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/nl/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/nl/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From 433183ea109ce968470ba401a1e84981d4de2ae8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:09 -0500 Subject: [PATCH 0615/1534] New translations derivedfrom.mdx (Polish) --- website/src/pages/pl/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/pl/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/pl/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/pl/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From 9d43256d4650577ade4e8b697bf670fbb0fabe10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:10 -0500 Subject: [PATCH 0616/1534] New translations derivedfrom.mdx (Portuguese) --- .../pt/subgraphs/cookbook/derivedfrom.mdx | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/pt/subgraphs/cookbook/derivedfrom.mdx index 21332819995b..49625d832da4 100644 --- a/website/src/pages/pt/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/derivedfrom.mdx @@ -1,8 +1,9 @@ --- title: Boas Práticas de Subgraph 2 - Melhorar a Indexação e a Capacidade de Resposta de Queries com @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- -## Resumo +## TLDR O desempenho de um subgraph pode ser muito atrasado por arranjos no seu schema, já que esses podem crescer além dos milhares de entradas. Se possível, a diretiva `@derivedFrom` deve ser usada ao usar arranjos, já que ela impede a formação de grandes arranjos, simplifica handlers e reduz o tamanho de entidades individuais, o que melhora muito a velocidade da indexação e o desempenho dos queries. @@ -65,24 +66,24 @@ Isto não só aumenta a eficiência do nosso subgraph, mas também desbloqueia t 2. Podemos fazer uma pesquisa reversa e um query sobre qualquer `Comment`, para ver de qual post ele vem. -3. Podemos usar [Carregadores de Campos Derivados](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) para desbloquear a habilidade de acessar e manipular diretamente dados de relacionamentos virtuais nos nossos mapeamentos de subgraph. +3. Podemos usar [Carregadores de Campos Derivados](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) para ativar o acesso e manipulação de dados diretamente de relacionamentos virtuais nos nossos mapeamentos de subgraph. ## Conclusão -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. +Usar a diretiva `@derivedFrom` nos subgraphs lida eficientemente com arranjos que crescem dinamicamente, o que melhora o desempenho da indexação e o retiro de dados. -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). +Para aprender mais estratégias detalhadas sobre evitar arranjos grandes, leia este blog por Kevin Jones: [Melhores Práticas no Desenvolvimento de Subgraphs: Como Evitar Grandes Arranjos](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). -## Subgraph Best Practices 1-6 +## Melhores Práticas para um Subgraph 1 – 6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Pruning: Reduza o Excesso de Dados do Seu Subgraph para Acelerar Queries](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Use o @derivedFrom para Melhorar a Resposta da Indexação e de Queries](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Melhore o Desempenho da Indexação e de Queries com o Uso de Bytes como IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Evite `eth-calls` para Acelerar a Indexação](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplifique e Otimize com Séries Temporais e Agregações](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Lance Hotfixes Mais Rápido com Enxertos](/subgraphs/cookbook/grafting-hotfix/) From cec2c5167bba549f388bc67d528e752ebfa6a7b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:11 -0500 Subject: [PATCH 0617/1534] New translations derivedfrom.mdx (Russian) --- .../ru/subgraphs/cookbook/derivedfrom.mdx | 55 ++++++++++--------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ru/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..0e7251d0645f 100644 --- a/website/src/pages/ru/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/derivedfrom.mdx @@ -1,28 +1,29 @@ --- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +title: Лучшая практика для субграфов 2 — улучшение индексирования и отклика на запросы с помощью @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- -## TLDR +## Краткое содержание -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. +Массивы в Вашей схеме могут значительно замедлить работу субграфа, когда их размер превышает тысячи элементов. Если возможно, следует использовать директиву @derivedFrom при работе с массивами, так как она предотвращает образование больших массивов, упрощает обработчики и уменьшает размер отдельных элементов, что значительно улучшает скорость индексирования и производительность запросов. -## How to Use the `@derivedFrom` Directive +## Как использовать директиву @derivedFrom -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: +Вам нужно просто добавить директиву @derivedFrom после массива в своей схеме. Например: ```graphql comments: [Comment!]! @derivedFrom(field: "post") ``` -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. +@derivedFrom создает эффективные отношения "один ко многим", позволяя объекту динамически ассоциироваться с несколькими связанными объектами на основе поля в связанном объекте. Этот подход исключает необходимость хранения продублированных данных с обеих сторон отношений, что делает субграф более эффективным. -### Example Use Case for `@derivedFrom` +### Пример использования @derivedFrom -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. +Пример динамически растущего массива — это платформа для блогов, где у "Поста" может быть много "Комментариев". -Let’s start with our two entities, `Post` and `Comment` +Начнем с наших двух объектов: `Post` и `Comment` -Without optimization, you could implement it like this with an array: +Без оптимизации Вы могли бы реализовать это следующим образом, используя массив: ```graphql type Post @entity { @@ -38,9 +39,9 @@ type Comment @entity { } ``` -Arrays like these will effectively store extra Comments data on the Post side of the relationship. +Подобные массивы будут эффективно хранить дополнительные данные о Comments на стороне отношения Post. -Here’s what an optimized version looks like using `@derivedFrom`: +Вот как будет выглядеть оптимизированная версия с использованием @derivedFrom: ```graphql type Post @entity { @@ -57,32 +58,32 @@ type Comment @entity { } ``` -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. +Именно при добавлении директивы `@derivedFrom`, эта схема будет хранить "Comments" только на стороне отношения "Comments", а не на стороне отношения "Post". Массивы хранятся в отдельных строках, что позволяет им значительно расширяться. Это может привести к очень большим объёмам, поскольку их рост не ограничен. -This will not only make our subgraph more efficient, but it will also unlock three features: +Это не только сделает наш субграф более эффективным, но и откроет три возможности: -1. We can query the `Post` and see all of its comments. +1. Мы можем запрашивать `Post` и видеть все его комментарии. -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. +2. Мы можем выполнить обратный поиск и запросить любой `Comment`, чтобы увидеть, от какого поста он пришел. -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. +3. Мы можем использовать [Загрузчики производных полей](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities), чтобы получить возможность напрямую обращаться и манипулировать данными из виртуальных отношений в наших мэппингах субграфа. -## Conclusion +## Заключение -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. +Используйте директиву `@derivedFrom` в субграфах для эффективного управления динамически растущими массивами, улучшая эффективность индексирования и извлечения данных. -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). +Для более подробного объяснения стратегий, которые помогут избежать использования больших массивов, ознакомьтесь с блогом Кевина Джонса: [Лучшие практики разработки субграфов: как избежать больших массивов](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). -## Subgraph Best Practices 1-6 +## Лучшие практики для субграфов 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Увеличение скорости запросов с помощью обрезки субграфов](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Улучшение индексирования и отклика запросов с использованием @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Улучшение индексирования и производительности запросов с использованием неизменяемых объектов и байтов в качестве идентификаторов](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Увеличение скорости индексирования путем избегания `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Упрощение и оптимизация с помощью временных рядов и агрегаций](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Использование переноса (графтинга) для быстрого развертывания исправлений](/subgraphs/cookbook/grafting-hotfix/) From 5b82dfad4aa6d75b6751bdafeb061ebdb4b1f235 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:12 -0500 Subject: [PATCH 0618/1534] New translations derivedfrom.mdx (Swedish) --- website/src/pages/sv/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/sv/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/sv/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/sv/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From bc66f29767cedc97cf7fcf454dd7a8eb5581924b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:13 -0500 Subject: [PATCH 0619/1534] New translations derivedfrom.mdx (Turkish) --- .../tr/subgraphs/cookbook/derivedfrom.mdx | 55 ++++++++++--------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/tr/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..76af9768effd 100644 --- a/website/src/pages/tr/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/derivedfrom.mdx @@ -1,28 +1,29 @@ --- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +title: Subgraph Örnek Uygulamalar 2 - @derivedFrom Kullanarak Endeksleme ve Sorgu Performansını İyileştirin +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- -## TLDR +## Özet -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. +Şemanızdaki diziler, binlerce girişin ötesine geçtiğinde subgraph performansını ciddi şekilde yavaşlatabilir. Mümkünse `@derivedFrom` yönergesi kullanılmalıdır. Bu yaklaşım; büyük dizilerin oluşmasını önler, işleyicileri basitleştirir ve bireysel varlıkların boyutunu küçülterek endeksleme hızını ve sorgu performansını önemli ölçüde artırır. -## How to Use the `@derivedFrom` Directive +## `@derivedFrom` Yönergesi Nasıl Kullanılır -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: +Şemanızdaki dizinin ardına @derivedFrom yönergesini eklemeniz yeterlidir. Örnek: ```graphql comments: [Comment!]! @derivedFrom(field: "post") ``` -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. +`@derivedFrom`, verimli bir şekilde birden çoka ilişkiler oluşturur. Böylece bir varlığın, ilgili alan temelinde birden fazla ilişkili varlıklarla dinamik olarak ilişkilendirilmesini sağlar. Bu yaklaşım, ilişkinin her iki tarafının da yinelenen verileri saklama gerekliliğini ortadan kaldırarak subgraph'i daha verimli hale getirir. -### Example Use Case for `@derivedFrom` +### `@derivedFrom`'ın Örnek Kullanımı -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. +Dinamik olarak büyüyen dizilere örnek olarak, bir "Post"un (gönderinin) birçok "Comment" (yorum) içerebileceği bir blog platformu verilebilir. -Let’s start with our two entities, `Post` and `Comment` +`Post` ve `Comment` olmak üzere iki varlıkla başlayalım -Without optimization, you could implement it like this with an array: +Optimizasyon olmadan, bunu bir dizi ile şu şekilde uygulayabilirsiniz: ```graphql type Post @entity { @@ -38,9 +39,9 @@ type Comment @entity { } ``` -Arrays like these will effectively store extra Comments data on the Post side of the relationship. +Bu tür diziler, fiilen, ilişkinin "Post" tarafında fazladan "Comments" verisi depolar. -Here’s what an optimized version looks like using `@derivedFrom`: +`@derivedFrom` kullanarak optimize edilmiş bir sürüm şu şekilde görünür: ```graphql type Post @entity { @@ -57,32 +58,32 @@ type Comment @entity { } ``` -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. +Sadece `@derivedFrom` yönergesini ekleyerek bu şema, ilişkinin “Post” tarafında değil, yalnızca “Comments” tarafında “Comments” verilerini depolamış olur. Diziler bireysel satırlara yayıldığı için önemli ölçüde genişleyebilir. Bu durum, büyüme sınırsız olduğunda büyük boyutlara yol açabilir. -This will not only make our subgraph more efficient, but it will also unlock three features: +Bu yalnızca subgraph'imizi daha verimli hale getirmekle kalmayacak, aynı zamanda şu üç özelliği de kullanmamıza olanak tanıyacaktır: -1. We can query the `Post` and see all of its comments. +1. `Post`'u sorgulayarak tüm yorumlarını görebiliriz. -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. +2. Herhangi bir `Comment`'te tersine arama yapabilir ve hangi gönderiden geldiğini sorgulayabiliriz. -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. +3. [Türetilmiş Alan Yükleyicileri](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) kullanarak, sanal ilişkilerden gelen verilere doğrudan erişim ve manipülasyon yapma yeteneğini subgraph eşlemelerinde etkinleştirebiliriz. -## Conclusion +## Sonuç -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. +`@derivedFrom` direktifini subgraph'lerde dinamik olarak büyüyen dizileri etkili bir şekilde yönetmek için kullanın. Bu direktif endeksleme verimliliğini ve veri alımını artırır. -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). +Büyük dizilerden kaçınma stratejilerinin daha ayrıntılı bir açıklaması için Kevin Jones'un blog yazısına göz atın: [Subgraph Geliştiriminde Örnek Uygulamalar: Büyük Dize Kümelerinden Kaçınmak](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). -## Subgraph Best Practices 1-6 +## Subgraph Örnek Uygulamalar 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Subgraph Budama ile Sorgu Hızını İyileştirin](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [@derivedFrom Kullanarak Endeksleme ve Sorgu Yanıt Hızını Artırın](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Değişmez Varlıklar ve Bytes ID'ler Kullanarak Endeksleme ve Sorgu Performansını Artırın](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Endeksleme Hızını `eth_calls`'den Kaçınarak İyileştirin](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Zaman Serileri ve Bütünleştirme ile Basitleştirin ve Optimize Edin](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Hızlı Düzeltme Dağıtımı için Aşılama Kullanın](/subgraphs/cookbook/grafting-hotfix/) From abf88d107f3ea0ed8b227661bb6f3e5200a95b64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:14 -0500 Subject: [PATCH 0620/1534] New translations derivedfrom.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/uk/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/uk/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/uk/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From dae59c536336195d3663b730a13fb13a9dbbb56f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:15 -0500 Subject: [PATCH 0621/1534] New translations derivedfrom.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/zh/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/zh/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/zh/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From 8deceed17cda1dc54e07513ff5c610bfdbc1949a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:16 -0500 Subject: [PATCH 0622/1534] New translations derivedfrom.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ur/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ur/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/ur/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From ac25781cbfc660dcbf860e097b81a7c8fc6e85e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:17 -0500 Subject: [PATCH 0623/1534] New translations derivedfrom.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/vi/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/vi/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/vi/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From ed43d4c491535cc06b852ea8f2ed56844f1b6d46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:18 -0500 Subject: [PATCH 0624/1534] New translations derivedfrom.mdx (Marathi) --- website/src/pages/mr/subgraphs/cookbook/derivedfrom.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/mr/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/mr/subgraphs/cookbook/derivedfrom.mdx index 22845a8d7dd2..7bbb8b50ecde 100644 --- a/website/src/pages/mr/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- ## TLDR From f30925075b3bcfb897eb93f312d011b733b9c061 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:19 -0500 Subject: [PATCH 0625/1534] New translations derivedfrom.mdx (Hindi) --- website/src/pages/hi/subgraphs/cookbook/derivedfrom.mdx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/hi/subgraphs/cookbook/derivedfrom.mdx index 8d20dc0e36fe..28bd4f93450d 100644 --- a/website/src/pages/hi/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/derivedfrom.mdx @@ -1,8 +1,9 @@ --- title: Subgraph सर्वोत्तम प्रथा 2 - @derivedFrom का उपयोग करके अनुक्रमण और क्वेरी की प्रतिक्रियाशीलता में सुधार करें। +sidebarTitle: "Subgraph Best Practice 2: Arrays with @derivedFrom" --- -## संक्षेप में +## TLDR आपके स्कीमा में ऐरे हजारों प्रविष्टियों से बढ़ने पर एक सबग्राफ के प्रदर्शन को वास्तव में धीमा कर सकते हैं। यदि संभव हो, तो @derivedFrom निर्देशिका का उपयोग करना चाहिए जब आप ऐरे का उपयोग कर रहे हों, क्योंकि यह बड़े ऐरे के निर्माण को रोकता है, हैंडलरों को सरल बनाता है और व्यक्तिगत संस्थाओं के आकार को कम करता है, जिससे अनुक्रमण गति और प्रश्न प्रदर्शन में महत्वपूर्ण सुधार होता है। @@ -65,7 +66,7 @@ type Comment @entity { 2. हम एक रिवर्स लुकअप कर सकते हैं और किसी भी Comment को क्वेरी कर सकते हैं और देख सकते हैं कि यह किस पोस्ट से आया है। -3. हम [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) का उपयोग कर सकते हैं ताकि हमारे Subgraph मैपिंग में वर्चुअल संबंधों से डेटा को सीधे एक्सेस और संपादित करने की क्षमता को अनलॉक किया जा सके। +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. ## निष्कर्ष From d791620485cdbcc738d0a0fcdc95573103ccf484 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:21 -0500 Subject: [PATCH 0626/1534] New translations enums.mdx (French) --- .../src/pages/fr/subgraphs/cookbook/enums.mdx | 148 +++++++++--------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/enums.mdx b/website/src/pages/fr/subgraphs/cookbook/enums.mdx index ac68bdc05ade..5784cb991330 100644 --- a/website/src/pages/fr/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/enums.mdx @@ -1,20 +1,20 @@ --- -title: Categorize NFT Marketplaces Using Enums +title: Catégoriser les marketplaces NFT à l’aide d’Enums --- -Use Enums to make your code cleaner and less error-prone. Here's a full example of using Enums on NFT marketplaces. +Utilisez des Enums pour rendre votre code plus propre et moins sujet aux erreurs. Voici un exemple complet d'utilisation des Enums sur les marketplaces NFT. -## What are Enums? +## Que sont les Enums ? -Enums, or enumeration types, are a specific data type that allows you to define a set of specific, allowed values. +Les Enums, ou types d'énumération, sont un type de données spécifique qui vous permet de définir un ensemble de valeurs spécifiques et autorisées. -### Example of Enums in Your Schema +### Exemple d'Enums dans Votre Schéma -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +Si vous construisez un subgraph pour suivre l'historique de propriété des tokens sur une marketplace, chaque token peut passer par différentes propriétés, telles que`OriginalOwner`, `SecondOwner`, et `ThirdOwner`. En utilisant des enums, vous pouvez définir ces propriétés spécifiques, garantissant que seules des valeurs prédéfinies sont utilisées. -You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. +Vous pouvez définir des enums dans votre schéma et, une fois définis, vous pouvez utiliser la représentation en chaîne de caractères des valeurs enum pour définir un champ enum sur une entité. -Here's what an enum definition might look like in your schema, based on the example above: +Voici à quoi pourrait ressembler une définition d'enum dans votre schéma, basée sur l'exemple ci-dessus : ```graphql enum TokenStatus { @@ -24,109 +24,109 @@ enum TokenStatus { } ``` -This means that when you use the `TokenStatus` type in your schema, you expect it to be exactly one of predefined values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`, ensuring consistency and validity. +Ceci signifie que lorsque vous utilisez le type `TokenStatus` dans votre schéma, vous attendez qu'il soit exactement l'une des valeurs prédéfinies : `OriginalOwner`, `SecondOwner`, ou `ThirdOwner`, garantissant la cohérence et la validité des données. -To learn more about enums, check out [Creating a Subgraph](/developing/creating-a-subgraph/#enums) and [GraphQL documentation](https://graphql.org/learn/schema/#enumeration-types). +Pour en savoir plus sur les enums, consultez [Création d'un Subgraph](/developing/creating-a-subgraph/#enums) et [documentation GraphQL ](https://graphql.org/learn/schema/#enumeration-types). -## Benefits of Using Enums +## Avantages de l'Utilisation des Enums -- **Clarity:** Enums provide meaningful names for values, making data easier to understand. -- **Validation:** Enums enforce strict value definitions, preventing invalid data entries. -- **Maintainability:** When you need to change or add new categories, enums allow you to do this in a focused manner. +- **Clarté** : Les enums fournissent des noms significatifs pour les valeurs, rendant les données plus faciles à comprendre. +- **Validation** : Les enums imposent des définitions de valeurs strictes, empêchant les entrées de données invalides. +- **Maintenabilité** : Lorsque vous avez besoin de changer ou d'ajouter de nouvelles catégories, les enums vous permettent de le faire de manière ciblée. -### Without Enums +### Sans Enums -If you choose to define the type as a string instead of using an Enum, your code might look like this: +Si vous choisissez de définir le type comme une chaîne de caractères au lieu d'utiliser un Enum, votre code pourrait ressembler à ceci : ```graphql type Token @entity { id: ID! tokenId: BigInt! - owner: Bytes! # Owner of the token - tokenStatus: String! # String field to track token status + owner: Bytes! # Propriétaire du jeton + tokenStatus: String! # Champ de type chaîne pour suivre l'état du jeton timestamp: BigInt! } ``` -In this schema, `TokenStatus` is a simple string with no specific, allowed values. +Dans ce schéma, `TokenStatus` est une simple chaîne de caractères sans valeurs spécifiques autorisées. #### Pourquoi est-ce un problème ? -- There's no restriction of `TokenStatus` values, so any string can be accidentally assigned. This makes it hard to ensure that only valid statuses like `OriginalOwner`, `SecondOwner`, or `ThirdOwner` are set. -- It's easy to make typos such as `Orgnalowner` instead of `OriginalOwner`, making the data and potential queries unreliable. +- Il n'y a aucune restriction sur les valeurs de `TokenStatus` : n’importe quelle chaîne de caractères peut être affectée par inadvertance. Difficile donc de s'assurer que seules des valeurs valides comme comme `OriginalOwner`, `SecondOwner`, ou `ThirdOwner` soient utilisées. +- Il est facile de faire des fautes de frappe comme `Orgnalowner` au lieu de `OriginalOwner`, rendant les données et les requêtes potentielles peu fiables. -### With Enums +### Avec Enums -Instead of assigning free-form strings, you can define an enum for `TokenStatus` with specific values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`. Using an enum ensures only allowed values are used. +Au lieu d'assigner des chaînes de caractères libres, vous pouvez définir un enum pour `TokenStatus` avec des valeurs spécifiques : `OriginalOwner`, `SecondOwner`, ou `ThirdOwner`. L'utilisation d'un enum garantit que seules les valeurs autorisées sont utilisées. -Enums provide type safety, minimize typo risks, and ensure consistent and reliable results. +Les Enums assurent la sécurité des types, minimisent les risques de fautes de frappe et garantissent des résultats cohérents et fiables. -## Defining Enums for NFT Marketplaces +## Définition des Enums pour les Marketplaces NFT -> Note: The following guide uses the CryptoCoven NFT smart contract. +> Note: Le guide suivant utilise le smart contract CryptoCoven NFT. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +Pour définir des enums pour les différents marketplaces où les NFTs sont échangés, utilisez ce qui suit dans votre schéma de subgraph : ```gql -# Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) +# Enum pour les Marketplaces avec lesquelles le contrat CryptoCoven a interagi (probablement une vente ou un mint) enum Marketplace { - OpenSeaV1 # Represents when a CryptoCoven NFT is traded on the marketplace - OpenSeaV2 # Represents when a CryptoCoven NFT is traded on the OpenSeaV2 marketplace - SeaPort # Represents when a CryptoCoven NFT is traded on the SeaPort marketplace - LooksRare # Represents when a CryptoCoven NFT is traded on the LookRare marketplace - # ...and other marketplaces + OpenSeaV1 # Représente lorsque un NFT CryptoCoven est échangé sur la marketplace + OpenSeaV2 # Représente lorsque un NFT CryptoCoven est échangé sur la marketplace OpenSeaV2 + SeaPort # Représente lorsque un NFT CryptoCoven est échangé sur la marketplace SeaPort + LooksRare # Représente lorsque un NFT CryptoCoven est échangé sur la marketplace LooksRare + # ...et d'autres marketplaces } ``` -## Using Enums for NFT Marketplaces +## Utilisation des Enums pour les Marketplaces NFT -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Une fois définis, les enums peuvent être utilisés tout au long de votre subgraph pour catégoriser les transactions ou les événements. -For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. +Par exemple, lors de la journalisation des ventes de NFT, vous pouvez spécifier la marketplace impliqué dans la transaction en utilisant l'enum. -### Implementing a Function for NFT Marketplaces +### Implémenter une Fonction pour les Marketplaces NFT -Here's how you can implement a function to retrieve the marketplace name from the enum as a string: +Voici comment vous pouvez implémenter une fonction pour récupérer le nom de la marketplace à partir de l'enum sous forme de chaîne de caractères : ```ts export function getMarketplaceName(marketplace: Marketplace): string { - // Using if-else statements to map the enum value to a string + // Utilisation des instructions if-else pour mapper la valeur de l'enum à une chaîne de caractères if (marketplace === Marketplace.OpenSeaV1) { - return 'OpenSeaV1' // If the marketplace is OpenSea, return its string representation + return 'OpenSeaV1' // Si le marketplace est OpenSea, renvoie sa représentation en chaîne de caractères } else if (marketplace === Marketplace.OpenSeaV2) { return 'OpenSeaV2' } else if (marketplace === Marketplace.SeaPort) { - return 'SeaPort' // If the marketplace is SeaPort, return its string representation + return 'SeaPort' // Si le marketplace est SeaPort, renvoie sa représentation en chaîne de caractères } else if (marketplace === Marketplace.LooksRare) { - return 'LooksRare' // If the marketplace is LooksRare, return its string representation - // ... and other market places + return 'LooksRare' // Si le marketplace est LooksRare, renvoie sa représentation en chaîne de caractères + // ... et d'autres marketplaces } } ``` -## Best Practices for Using Enums +## Bonnes Pratiques pour l'Utilisation des Enums -- **Consistent Naming:** Use clear, descriptive names for enum values to improve readability. -- **Centralized Management:** Keep enums in a single file for consistency. This makes enums easier to update and ensures they are the single source of truth. -- **Documentation:** Add comments to enum to clarify their purpose and usage. +- **Nommer avec cohérence** : Utilisez des noms clairs et descriptifs pour les valeurs d'enum pour améliorer la lisibilité. +- **Gestion Centralisée** : Gardez les enums dans un fichier unique pour plus de cohérence. Ainsi, il est plus simple de les mettre à jour et de garantir qu’ils sont votre unique source de vérité. +- **Documentation** : Ajoutez des commentaires aux enums pour clarifier leur objectif et leur utilisation. -## Using Enums in Queries +## Utilisation des Enums dans les Requêtes -Enums in queries help you improve data quality and make your results easier to interpret. They function as filters and response elements, ensuring consistency and reducing errors in marketplace values. +Les enums dans les requêtes aident à améliorer la qualité des données et à rendre les résultats plus faciles à interpréter. Ils fonctionnent comme des filtres et des éléments de réponse, assurant la cohérence et réduisant les erreurs dans les valeurs des marketplaces. -**Specifics** +Spécificités -- **Filtering with Enums:** Enums provide clear filters, allowing you to confidently include or exclude specific marketplaces. -- **Enums in Responses:** Enums guarantee that only recognized marketplace names are returned, making the results standardized and accurate. +- **Filtrer avec des Enums**: Les Enums offrent des filtres clairs, vous permettant d’inclure ou d’exclure facilement des marketplaces spécifiques. +- **Enums dans les Réponses**: Les Enums garantissent que seules des valeurs de marketplace reconnues sont renvoyées, ce qui rend les résultats standardisés et précis. -### Sample Queries +### Exemples de requêtes -#### Query 1: Account With The Highest NFT Marketplace Interactions +#### Requête 1 : Compte avec le Plus d'Interactions sur les Marketplaces NFT -This query does the following: +Cette requête fait ce qui suit : -- It finds the account with the highest unique NFT marketplace interactions, which is great for analyzing cross-marketplace activity. -- The marketplaces field uses the marketplace enum, ensuring consistent and validated marketplace values in the response. +- Elle trouve le compte avec le plus grand nombre unique d'interactions sur les marketplaces NFT, ce qui est excellent pour analyser l'activité inter-marketplaces. +- Le champ marketplaces utilise l'enum marketplace, garantissant des valeurs de marketplace cohérentes et validées dans la réponse. ```gql { @@ -137,15 +137,15 @@ This query does the following: totalSpent uniqueMarketplacesCount marketplaces { - marketplace # This field returns the enum value representing the marketplace + marketplace # Ce champ retourne la valeur enum représentant la marketplace } } } ``` -#### Returns +#### Résultats -This response provides account details and a list of unique marketplace interactions with enum values for standardized clarity: +Cette réponse fournit les détails du compte et une liste des interactions uniques sur les marketplaces avec des valeurs enum pour une clarté standardisée : ```gql { @@ -186,12 +186,12 @@ This response provides account details and a list of unique marketplace interact } ``` -#### Query 2: Most Active Marketplace for CryptoCoven transactions +#### Requête 2 : Marketplace la Plus Active pour les Transactions CryptoCoven -This query does the following: +Cette requête fait ce qui suit : -- It identifies the marketplace with the highest volume of CryptoCoven transactions. -- It uses the marketplace enum to ensure that only valid marketplace types appear in the response, adding reliability and consistency to your data. +- Elle identifie la marketplace avec le plus grand volume de transactions CryptoCoven. +- Il utilise l'enum marketplace pour s'assurer que seuls les types de marketplace valides apparaissent dans la réponse, ajoutant fiabilité et cohérence à vos données. ```gql { @@ -202,9 +202,9 @@ This query does the following: } ``` -#### Result 2 +#### Résultat 2 -The expected response includes the marketplace and the corresponding transaction count, using the enum to indicate the marketplace type: +La réponse attendue inclut la marketplace et le nombre de transactions correspondant, en utilisant l'enum pour indiquer le type de marketplace : ```gql { @@ -219,12 +219,12 @@ The expected response includes the marketplace and the corresponding transaction } ``` -#### Query 3: Marketplace Interactions with High Transaction Counts +#### Requête 3: Interactions sur les marketplaces avec un haut volume de transactions -This query does the following: +Cette requête fait ce qui suit : -- It retrieves the top four marketplaces with over 100 transactions, excluding "Unknown" marketplaces. -- It uses enums as filters to ensure that only valid marketplace types are included, increasing accuracy. +- Elle récupère les quatre principales marketplaces avec plus de 100 transactions, en excluant les marketplaces "Unknown". +- Elle utilise des enums comme filtres pour s'assurer que seuls les types de marketplace valides sont inclus, augmentant ainsi la précision. ```gql { @@ -240,9 +240,9 @@ This query does the following: } ``` -#### Result 3 +#### Résultat 3 -Expected output includes the marketplaces that meet the criteria, each represented by an enum value: +La sortie attendue inclut les marketplaces qui répondent aux critères, chacune représentée par une valeur enum : ```gql { @@ -269,6 +269,6 @@ Expected output includes the marketplaces that meet the criteria, each represent } ``` -## Ressources additionnelles +## Ressources supplémentaires -For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). +Pour des informations supplémentaires, consultez le [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums) de ce guide. From 18875c475ee7410117ff8803674739f308d393b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:23 -0500 Subject: [PATCH 0627/1534] New translations enums.mdx (German) --- website/src/pages/de/subgraphs/cookbook/enums.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/cookbook/enums.mdx b/website/src/pages/de/subgraphs/cookbook/enums.mdx index 8db81193d949..0b2fe58b4e34 100644 --- a/website/src/pages/de/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/de/subgraphs/cookbook/enums.mdx @@ -269,6 +269,6 @@ Expected output includes the marketplaces that meet the criteria, each represent } ``` -## Additional Resources +## Zusätzliche Ressourcen For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). From a9c365cbdab134cb546247889c61fddc0eb6fa66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:28 -0500 Subject: [PATCH 0628/1534] New translations enums.mdx (Portuguese) --- .../src/pages/pt/subgraphs/cookbook/enums.mdx | 146 +++++++++--------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/enums.mdx b/website/src/pages/pt/subgraphs/cookbook/enums.mdx index ddfda1aae5bd..d76ea4c23c4b 100644 --- a/website/src/pages/pt/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/enums.mdx @@ -1,20 +1,20 @@ --- -title: Categorize NFT Marketplaces Using Enums +title: Categorize Marketplaces de NFT com Enums --- -Use Enums to make your code cleaner and less error-prone. Here's a full example of using Enums on NFT marketplaces. +Use Enums para deixar o seu código mais limpo e menos vulnerável a erros. Veja um exemplo do uso deste em marketplaces de NFT. -## What are Enums? +## O que são Enums? -Enums, or enumeration types, are a specific data type that allows you to define a set of specific, allowed values. +Enums, ou tipos de enumeração, são um tipo de dados específico que permite definir um conjunto de valores específicos permitidos. -### Example of Enums in Your Schema +### Exemplo de Enums no seu Schema -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +Se estiver a construir um subgraph para rastrear o histórico de posse de tokens em um marketplace, cada token pode passar por posses diferentes, como `OriginalOwner`, `SecondOwner`, e `ThirdOwner`. Ao usar enums, é possível definir essas posses específicas, assim garantindo que só são nomeados valores predefinidos. -You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. +É possível definir enums no seu schema; assim definidos, a representação de string dos valores de enum podem ser usados para configurar um campo de enum numa entidade. -Here's what an enum definition might look like in your schema, based on the example above: +Com base no exemplo acima, uma definição de enum no seu schema pode ficar assim: ```graphql enum TokenStatus { @@ -24,109 +24,109 @@ enum TokenStatus { } ``` -This means that when you use the `TokenStatus` type in your schema, you expect it to be exactly one of predefined values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`, ensuring consistency and validity. +Ou seja, quando usar o tipo `TokenStatus` no seu schema, a expectativa é de que seja exatamente um dos valores predefinidos: `OriginalOwner`, `SecondOwner`, ou `ThirdOwner`, assim garantindo a consistência e validez. -To learn more about enums, check out [Creating a Subgraph](/developing/creating-a-subgraph/#enums) and [GraphQL documentation](https://graphql.org/learn/schema/#enumeration-types). +Para saber mais sobre enums, veja [Como Criar um Subgraph](/developing/creating-a-subgraph/#enums) e a [documentação da GraphQL](https://graphql.org/learn/schema/#enumeration-types). -## Benefits of Using Enums +## Vantagens de Usar Enums -- **Clarity:** Enums provide meaningful names for values, making data easier to understand. -- **Validation:** Enums enforce strict value definitions, preventing invalid data entries. -- **Maintainability:** When you need to change or add new categories, enums allow you to do this in a focused manner. +- **Clareza**: Enums dão nomes significativos a valores, facilitando a navegação de dados. +- **Validação**: Enums aplicam definições rígidas de valor, assim evitando entradas inválidas de dados. +- **Manutenibilidade:** Enums permitem mudar ou adicionar novas categorias de maneira concentrada. -### Without Enums +### Sem Enums -If you choose to define the type as a string instead of using an Enum, your code might look like this: +Se escolher definir o tipo como um string em vez de usar um Enum, seu código poderá ficar assim: ```graphql type Token @entity { id: ID! tokenId: BigInt! - owner: Bytes! # Owner of the token - tokenStatus: String! # String field to track token status + owner: Bytes! # Proprietário do token + tokenStatus: String! # Campo de string para rastrear o estado do token timestamp: BigInt! } ``` -In this schema, `TokenStatus` is a simple string with no specific, allowed values. +Neste schema, `TokenStatus` é um string simples sem valores permitidos específicos. #### Por que isto é um problema? -- There's no restriction of `TokenStatus` values, so any string can be accidentally assigned. This makes it hard to ensure that only valid statuses like `OriginalOwner`, `SecondOwner`, or `ThirdOwner` are set. -- It's easy to make typos such as `Orgnalowner` instead of `OriginalOwner`, making the data and potential queries unreliable. +- Não há restrições sobre valores `TokenStatus`, então qualquer string pode ser nomeado por acidente. Assim, fica difícil garantir que só estados válidos — como `OriginalOwner`, `SecondOwner`, ou `ThirdOwner` — serão configurados. +- É fácil fazer erros de digitação, como `Orgnalowner` em vez de `OriginalOwner`, o que faria os dados, e queries em potencial, não confiáveis. -### With Enums +### Com Enums -Instead of assigning free-form strings, you can define an enum for `TokenStatus` with specific values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`. Using an enum ensures only allowed values are used. +Em vez de nomear strings livres, é possível definir um enum para `TokenStatus` com valores específicos: `OriginalOwner`, `SecondOwner`, ou `ThirdOwner`. Usar um enum garante o uso de valores permitidos, e mais nenhum. -Enums provide type safety, minimize typo risks, and ensure consistent and reliable results. +Enums provém segurança de dados, minimizam os riscos de erros de digitação, e garantem resultados consistentes e confiáveis. -## Defining Enums for NFT Marketplaces +## Como Definir Enums para Marketplaces de NFT -> Note: The following guide uses the CryptoCoven NFT smart contract. +> Nota: o guia a seguir usa o contrato inteligente de NFTs CryptoCoven. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +Para definir enums para os vários marketplaces com apoio a troca de NFTs, use o seguinte no seu schema de subgraph: ```gql -# Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) +# Enum para Marketplaces com que o contrato CryptoCoven interagiu(provavelmente Troca/Mint) enum Marketplace { - OpenSeaV1 # Represents when a CryptoCoven NFT is traded on the marketplace - OpenSeaV2 # Represents when a CryptoCoven NFT is traded on the OpenSeaV2 marketplace - SeaPort # Represents when a CryptoCoven NFT is traded on the SeaPort marketplace - LooksRare # Represents when a CryptoCoven NFT is traded on the LookRare marketplace - # ...and other marketplaces + OpenSeaV1 # Representa o comércio de um NFT CryptoCoven no marketplace + OpenSeaV2 # Representa o comércio de um NFT CryptoCoven no marketplace OpenSeaV2 + SeaPort # Representa o comércio de um NFT CryptoCoven no marketplace SeaPort + LooksRare # Representa o comércio de um NFT CryptoCoven no marketplace LookRare + # ...e outros marketplaces } ``` -## Using Enums for NFT Marketplaces +## Como Usar Enums para Marketplaces de NFT -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Quando definidos, enums podem ser usados no seu subgraph para categorizar transações ou eventos. -For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. +Por exemplo: ao registrar vendas de NFT, é possível usar o enum para especificar o marketplace envolvido na ação. -### Implementing a Function for NFT Marketplaces +### Como Implementar uma Função para Marketplaces de NFT -Here's how you can implement a function to retrieve the marketplace name from the enum as a string: +Veja como implementar uma função para resgatar o nome de um marketplace do enum, como string: ```ts export function getMarketplaceName(marketplace: Marketplace): string { - // Using if-else statements to map the enum value to a string + // Usando comandos if-else para mapear o valor do enum para um string if (marketplace === Marketplace.OpenSeaV1) { - return 'OpenSeaV1' // If the marketplace is OpenSea, return its string representation + return 'OpenSeaV1' // Se o marketplace for OpenSea, retornar sua representação de string } else if (marketplace === Marketplace.OpenSeaV2) { return 'OpenSeaV2' } else if (marketplace === Marketplace.SeaPort) { - return 'SeaPort' // If the marketplace is SeaPort, return its string representation + return 'SeaPort' // Se o marketplace for SeaPort, retornar sua representação de string } else if (marketplace === Marketplace.LooksRare) { - return 'LooksRare' // If the marketplace is LooksRare, return its string representation - // ... and other market places + return 'LooksRare' // Se o marketplace for LooksRare, retornar sua representação de string + // ... e outros marketplaces } } ``` -## Best Practices for Using Enums +## Melhores Práticas para o Uso de Enums -- **Consistent Naming:** Use clear, descriptive names for enum values to improve readability. -- **Centralized Management:** Keep enums in a single file for consistency. This makes enums easier to update and ensures they are the single source of truth. -- **Documentation:** Add comments to enum to clarify their purpose and usage. +- **Nomes Consistentes**: Deixe o seu código mais legível; use nomes mais claros e descritivos para valores de enum. +- **Gestão Centralizada:** Mantenha enums num único arquivo para ficar mais consistente. Assim, os enums ficam mais fáceis de atualizar, garantindo uma fonte única e verdadeira de dados. +- **Documentação:** Adicione comentários a enums para esclarecer o seu propósito e uso. -## Using Enums in Queries +## Como Usar Enums em Queries -Enums in queries help you improve data quality and make your results easier to interpret. They function as filters and response elements, ensuring consistency and reducing errors in marketplace values. +Enums em queries ajudam a melhorar a qualidade de dados e deixam os seus resultados mais fáceis de interpretar. Eles funcionam como filtros e elementos de resposta, assim garantindo a consistência e reduzindo erros em valores de marketplace. -**Specifics** +**Detalhes** -- **Filtering with Enums:** Enums provide clear filters, allowing you to confidently include or exclude specific marketplaces. -- **Enums in Responses:** Enums guarantee that only recognized marketplace names are returned, making the results standardized and accurate. +- **Filtros com Enums:** Enums fornecem filtros claros, permitindo a inclusão ou exclusão clara de marketplaces específicos. +- **Enums em Respostas:** Enums garantem que só marketplaces reconhecidos sejam retornados, trazendo resultados consistentes e precisos. -### Sample Queries +### Exemplos de Query -#### Query 1: Account With The Highest NFT Marketplace Interactions +#### Query 1: Conta com Mais Interações de Marketplace em NFT -This query does the following: +Este query faz o seguinte: -- It finds the account with the highest unique NFT marketplace interactions, which is great for analyzing cross-marketplace activity. -- The marketplaces field uses the marketplace enum, ensuring consistent and validated marketplace values in the response. +- Encontra a conta com mais interações únicas com marketplaces de NFT, ótimo para analisar atividade entre marketplaces. +- O campo de marketplaces usa o enum de marketplace, garantindo valores consistentes e validados na resposta. ```gql { @@ -137,15 +137,15 @@ This query does the following: totalSpent uniqueMarketplacesCount marketplaces { - marketplace # This field returns the enum value representing the marketplace + marketplace # Este campo retorna o valor de enum representando o marketplace } } } ``` -#### Returns +#### Respostas -This response provides account details and a list of unique marketplace interactions with enum values for standardized clarity: +Esta resposta fornece detalhes de conta e uma lista de interações singulares de marketplace com valores de enum, para mais clareza: ```gql { @@ -186,12 +186,12 @@ This response provides account details and a list of unique marketplace interact } ``` -#### Query 2: Most Active Marketplace for CryptoCoven transactions +#### Query 2: Marketplace mais ativo para transações do CryptoCoven -This query does the following: +Este query faz o seguinte: -- It identifies the marketplace with the highest volume of CryptoCoven transactions. -- It uses the marketplace enum to ensure that only valid marketplace types appear in the response, adding reliability and consistency to your data. +- Identifica o marketplace com maior volume de transações do CryptoCoven. +- Usa o enum de marketplace para garantir que só tipos válidos de marketplace apareçam na resposta, deixando os seus dados mais confiáveis e consistentes. ```gql { @@ -202,9 +202,9 @@ This query does the following: } ``` -#### Result 2 +#### Resultado 2 -The expected response includes the marketplace and the corresponding transaction count, using the enum to indicate the marketplace type: +A resposta esperada inclui o marketplace e a contagem correspondente de transações, usando o enum para indicar o tipo de marketplace: ```gql { @@ -219,12 +219,12 @@ The expected response includes the marketplace and the corresponding transaction } ``` -#### Query 3: Marketplace Interactions with High Transaction Counts +#### Query 3: Interações de Marketplace com Altos Números de Transação -This query does the following: +Este query faz o seguinte: -- It retrieves the top four marketplaces with over 100 transactions, excluding "Unknown" marketplaces. -- It uses enums as filters to ensure that only valid marketplace types are included, increasing accuracy. +- Resgata os quatro marketplaces com maior número de transações acima de 100, excluindo marketplaces "desconhecidos". +- Usa enums como filtros para garantir que só tipos válidos de marketplace sejam incluídos, deixando a resposta mais precisa. ```gql { @@ -240,9 +240,9 @@ This query does the following: } ``` -#### Result 3 +#### Resultado 3 -Expected output includes the marketplaces that meet the criteria, each represented by an enum value: +O retorno esperado inclui os marketplaces que cumprem os critérios, cada um representado por um valor enum: ```gql { @@ -271,4 +271,4 @@ Expected output includes the marketplaces that meet the criteria, each represent ## Outros Recursos -For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). +Para mais informações, veja o [repositório](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums) deste guia. From 026d85a4d6b434fae596a8a83c451d87ebb3c1b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:29 -0500 Subject: [PATCH 0629/1534] New translations enums.mdx (Russian) --- .../src/pages/ru/subgraphs/cookbook/enums.mdx | 145 +++++++++--------- 1 file changed, 73 insertions(+), 72 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/enums.mdx b/website/src/pages/ru/subgraphs/cookbook/enums.mdx index d2e724ae5f8e..be99c7607fbc 100644 --- a/website/src/pages/ru/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/enums.mdx @@ -1,20 +1,21 @@ --- -title: Categorize NFT Marketplaces Using Enums +title: Категоризация маркетплейсов NFT с использованием Enums (перечислений) --- -Use Enums to make your code cleaner and less error-prone. Here's a full example of using Enums on NFT marketplaces. +Используйте Enums (перечисления), чтобы сделать Ваш код чище и уменьшить вероятность ошибок. Вот полный пример использования перечислений для маркетплейсов NFT. -## What are Enums? +## Что такое Enums (перечисления)? -Enums, or enumeration types, are a specific data type that allows you to define a set of specific, allowed values. +Перечисления (или типы перечислений) — это особый тип данных, который позволяет определить набор конкретных допустимых значений. -### Example of Enums in Your Schema +### Пример использования Enums (перечислений) в Вашей схеме -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +Если вы создаете субграф для отслеживания истории владения токенами на рынке, каждый токен может переходить через разные стадии владения, такие как `OriginalOwner` (Первоначальный Владелец), `SecondOwner` (Второй Владелец) и `ThirdOwner` (Третий +Владелец). Используя перечисления (enums), Вы можете определить эти конкретные стадии владения, обеспечивая присвоение только заранее определенных значений. -You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. +Вы можете определить перечисления (enums) в своей схеме, и после их определения Вы можете использовать строковое представление значений перечислений для установки значения поля перечисления в объекты. -Here's what an enum definition might look like in your schema, based on the example above: +Вот как может выглядеть определение перечисления (enum) в Вашей схеме, исходя из приведенного выше примера: ```graphql enum TokenStatus { @@ -24,19 +25,19 @@ enum TokenStatus { } ``` -This means that when you use the `TokenStatus` type in your schema, you expect it to be exactly one of predefined values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`, ensuring consistency and validity. +Это означает, что когда Вы используете тип `TokenStatus` в своей схеме, Вы ожидаете, что он будет иметь одно из заранее определенных значений: `OriginalOwner` (Первоначальный Владелец), `SecondOwner` (Второй Владелец) или `ThirdOwner` (Третий Владелец), что обеспечивает согласованность и корректность данных. -To learn more about enums, check out [Creating a Subgraph](/developing/creating-a-subgraph/#enums) and [GraphQL documentation](https://graphql.org/learn/schema/#enumeration-types). +Чтобы узнать больше о перечислениях (Enums), ознакомьтесь с разделом [Создание субграфа](/developing/creating-a-subgraph/#enums) и с [документацией GraphQL](https://graphql.org/learn/schema/#enumeration-types). -## Benefits of Using Enums +## Преимущества использования перечислений (Enums) -- **Clarity:** Enums provide meaningful names for values, making data easier to understand. -- **Validation:** Enums enforce strict value definitions, preventing invalid data entries. -- **Maintainability:** When you need to change or add new categories, enums allow you to do this in a focused manner. +- **Ясность:** Перечисления предоставляют значимые имена для значений, что делает данные более понятными. +- **Валидация:** Перечисления обеспечивают строгие определения значений, предотвращая ввод недопустимых данных. +- **Поддерживаемость:** Когда Вам нужно изменить или добавить новые категории, перечисления позволяют сделать это целенаправленно и удобно. -### Without Enums +### Без перечислений (Enums) -If you choose to define the type as a string instead of using an Enum, your code might look like this: +Если Вы решите определить тип как строку вместо использования перечисления (Enum), Ваш код может выглядеть следующим образом: ```graphql type Token @entity { @@ -48,85 +49,85 @@ type Token @entity { } ``` -In this schema, `TokenStatus` is a simple string with no specific, allowed values. +В этой схеме `TokenStatus` является простой строкой без конкретных и допустимых значений. #### Почему это является проблемой? -- There's no restriction of `TokenStatus` values, so any string can be accidentally assigned. This makes it hard to ensure that only valid statuses like `OriginalOwner`, `SecondOwner`, or `ThirdOwner` are set. -- It's easy to make typos such as `Orgnalowner` instead of `OriginalOwner`, making the data and potential queries unreliable. +- Нет никаких ограничений на значения `TokenStatus`, поэтому любое строковое значение может быть назначено случайно. Это усложняет обеспечение того, что устанавливаются только допустимые статусы, такие как `OriginalOwner` (Первоначальный Владелец), `SecondOwner` (Второй Владелец) или `ThirdOwner` (Третий Владелец). +- Легко допустить опечатку, например, `Orgnalowner` вместо `OriginalOwner`, что делает данные и потенциальные запросы ненадежными. -### With Enums +### С перечислениями (Enums) -Instead of assigning free-form strings, you can define an enum for `TokenStatus` with specific values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`. Using an enum ensures only allowed values are used. +Вместо присвоения строк произвольной формы Вы можете определить перечисление (Enum) для `TokenStatus` с конкретными значениями: `OriginalOwner`, `SecondOwner` или `ThirdOwner`. Использование перечисления гарантирует, что используются только допустимые значения. -Enums provide type safety, minimize typo risks, and ensure consistent and reliable results. +Перечисления обеспечивают безопасность типов, минимизируют риск опечаток и гарантируют согласованные и надежные результаты. -## Defining Enums for NFT Marketplaces +## Определение перечислений (Enums) для Маркетплейсов NFT -> Note: The following guide uses the CryptoCoven NFT smart contract. +> Примечание: Следующее руководство использует смарт-контракт NFT CryptoCoven. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +Чтобы определить перечисления для различных маркетплейсов, на которых торгуются NFT, используйте следующее в своей схеме субграфа: ```gql -# Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) +# Перечисление для маркетплейсов, с которыми взаимодействовал смарт-контракт CryptoCoven (вероятно, торговля или минт) enum Marketplace { - OpenSeaV1 # Represents when a CryptoCoven NFT is traded on the marketplace - OpenSeaV2 # Represents when a CryptoCoven NFT is traded on the OpenSeaV2 marketplace - SeaPort # Represents when a CryptoCoven NFT is traded on the SeaPort marketplace - LooksRare # Represents when a CryptoCoven NFT is traded on the LookRare marketplace - # ...and other marketplaces + OpenSeaV1 # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе OpenSeaV1 + OpenSeaV2 # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе OpenSeaV2 + SeaPort # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе SeaPort + LooksRare # Представляет случай, когда NFT CryptoCoven торгуется на маркетплейсе LooksRare + # ...и другие рынки } ``` -## Using Enums for NFT Marketplaces +## Использование перечислений (Enums) для Маркетплейсов NFT -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +После определения перечисления (enums) могут использоваться в Вашем субграфе для категоризации транзакций или событий. -For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. +Например, при регистрации продаж NFT можно указать маркетплейс, на котором произошла сделка, используя перечисление. -### Implementing a Function for NFT Marketplaces +### Реализация функции для маркетплейсов NFT -Here's how you can implement a function to retrieve the marketplace name from the enum as a string: +Вот как можно реализовать функцию для получения названия маркетплейса из перечисления (enum) в виде строки: ```ts export function getMarketplaceName(marketplace: Marketplace): string { - // Using if-else statements to map the enum value to a string + // Используем операторы if-else для сопоставления значения перечисления со строкой if (marketplace === Marketplace.OpenSeaV1) { - return 'OpenSeaV1' // If the marketplace is OpenSea, return its string representation + return 'OpenSeaV1' // Если маркетплейс OpenSea, возвращаем его строковое представление } else if (marketplace === Marketplace.OpenSeaV2) { return 'OpenSeaV2' } else if (marketplace === Marketplace.SeaPort) { - return 'SeaPort' // If the marketplace is SeaPort, return its string representation + return 'SeaPort' // Если маркетплейс SeaPort, возвращаем его строковое представление } else if (marketplace === Marketplace.LooksRare) { - return 'LooksRare' // If the marketplace is LooksRare, return its string representation - // ... and other market places + return 'LooksRare' // Если маркетплейс LooksRare, возвращаем его строковое представление + // ... и другие маркетплейсы } } ``` -## Best Practices for Using Enums +## Лучшие практики использования перечислений (Enums) -- **Consistent Naming:** Use clear, descriptive names for enum values to improve readability. -- **Centralized Management:** Keep enums in a single file for consistency. This makes enums easier to update and ensures they are the single source of truth. -- **Documentation:** Add comments to enum to clarify their purpose and usage. +- **Согласованность в наименованиях:** Используйте четкие, описательные названия для значений перечислений, чтобы улучшить читаемость кода. +- **Централизованное управление:** Храните перечисления в одном файле для обеспечения согласованности. Это облегчает обновление перечислений и гарантирует, что они являются единственным источником достоверной информации. +- **Документация:** Добавляйте комментарии к перечислениям, чтобы прояснить их назначение и использование. -## Using Enums in Queries +## Использование перечислений (Enums) в запросах -Enums in queries help you improve data quality and make your results easier to interpret. They function as filters and response elements, ensuring consistency and reducing errors in marketplace values. +Перечисления в запросах помогают улучшить качество данных и делают результаты более понятными. Они функционируют как фильтры и элементы ответа, обеспечивая согласованность и уменьшая ошибки в значениях маркетплейса. -**Specifics** +**Особенности** -- **Filtering with Enums:** Enums provide clear filters, allowing you to confidently include or exclude specific marketplaces. -- **Enums in Responses:** Enums guarantee that only recognized marketplace names are returned, making the results standardized and accurate. +- **Фильтрация с помощью перечислений:** Перечисления предоставляют четкие фильтры, позволяя уверенно включать или исключать конкретные маркетплейсы. +- **Перечисления в ответах:** Перечисления гарантируют, что возвращаются только признанные названия маркетплейсов, делая результаты стандартизированными и точными. -### Sample Queries +### Пример запросов -#### Query 1: Account With The Highest NFT Marketplace Interactions +#### Запрос 1: Аккаунт с наибольшим количеством взаимодействий на маркетплейсе NFT -This query does the following: +Этот запрос выполняет следующие действия: -- It finds the account with the highest unique NFT marketplace interactions, which is great for analyzing cross-marketplace activity. -- The marketplaces field uses the marketplace enum, ensuring consistent and validated marketplace values in the response. +- Он находит аккаунт с наибольшим количеством уникальных взаимодействий с маркетплейсами NFT, что полезно для анализа активности на разных маркетплейсах. +- Поле маркетплейсов использует перечисление marketplace, что обеспечивает согласованность и валидацию значений маркетплейсов в ответе. ```gql { @@ -137,15 +138,15 @@ This query does the following: totalSpent uniqueMarketplacesCount marketplaces { - marketplace # This field returns the enum value representing the marketplace + marketplace # Это поле возвращает значение перечисления, представляющее маркетплейс } } } ``` -#### Returns +#### Результаты -This response provides account details and a list of unique marketplace interactions with enum values for standardized clarity: +Данный ответ включает информацию об аккаунте и перечень уникальных взаимодействий с маркетплейсом, где используются значения перечислений (enum) для обеспечения единообразной ясности: ```gql { @@ -186,12 +187,12 @@ This response provides account details and a list of unique marketplace interact } ``` -#### Query 2: Most Active Marketplace for CryptoCoven transactions +#### Запрос 2: Наиболее активный маркетплейс для транзакций CryptoCoven -This query does the following: +Этот запрос выполняет следующие действия: -- It identifies the marketplace with the highest volume of CryptoCoven transactions. -- It uses the marketplace enum to ensure that only valid marketplace types appear in the response, adding reliability and consistency to your data. +- Он определяет маркетплейс с наибольшим объемом транзакций CryptoCoven. +- Он использует перечисление marketplace, чтобы гарантировать, что в ответе будут только допустимые типы маркетплейсов, что повышает надежность и согласованность ваших данных. ```gql { @@ -202,9 +203,9 @@ This query does the following: } ``` -#### Result 2 +#### Результат 2 -The expected response includes the marketplace and the corresponding transaction count, using the enum to indicate the marketplace type: +Ожидаемый ответ включает маркетплейс и соответствующее количество транзакций, используя перечисление для указания типа маркетплейса: ```gql { @@ -219,12 +220,12 @@ The expected response includes the marketplace and the corresponding transaction } ``` -#### Query 3: Marketplace Interactions with High Transaction Counts +#### Запрос 3: Взаимодействия на маркетплейсе с высоким количеством транзакций -This query does the following: +Этот запрос выполняет следующие действия: -- It retrieves the top four marketplaces with over 100 transactions, excluding "Unknown" marketplaces. -- It uses enums as filters to ensure that only valid marketplace types are included, increasing accuracy. +- Он извлекает четыре самых активных маркетплейса с более чем 100 транзакциями, исключая маркетплейсы с типом "Unknown". +- Он использует перечисления в качестве фильтров, чтобы гарантировать, что включены только допустимые типы маркетплейсов, что повышает точность выполнения запроса. ```gql { @@ -240,9 +241,9 @@ This query does the following: } ``` -#### Result 3 +#### Результат 3 -Expected output includes the marketplaces that meet the criteria, each represented by an enum value: +Ожидаемый вывод включает маркетплейсы, которые соответствуют критериям, каждый из которых представлен значением перечисления: ```gql { @@ -269,6 +270,6 @@ Expected output includes the marketplaces that meet the criteria, each represent } ``` -## Дополнительные источники +## Дополнительные ресурсы -For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). +Дополнительную информацию можно найти в [репозитории] этого руководства (https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). From 771bd34b7f83c4c6cc48572699103fbc5193f1fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:31 -0500 Subject: [PATCH 0630/1534] New translations enums.mdx (Turkish) --- .../src/pages/tr/subgraphs/cookbook/enums.mdx | 146 +++++++++--------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/enums.mdx b/website/src/pages/tr/subgraphs/cookbook/enums.mdx index 99d1a01a02c0..6ec3ee35af1b 100644 --- a/website/src/pages/tr/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/enums.mdx @@ -1,20 +1,20 @@ --- -title: Categorize NFT Marketplaces Using Enums +title: NFT Pazar Yerlerini Enums Kullanarak Kategorize Etme --- -Use Enums to make your code cleaner and less error-prone. Here's a full example of using Enums on NFT marketplaces. +Kodu daha temiz yapmak ve hata yapma riskini azaltmak için Enums kullanın. İşte NFT pazar yerlerinde Enums kullanımına bir örnek. -## What are Enums? +## Enum'lar Nedir? -Enums, or enumeration types, are a specific data type that allows you to define a set of specific, allowed values. +Enum'lar veya numaralandırma türleri, bir dizi izin verilen değeri tanımlamanıza olanak tanıyan belirli bir veri türüdür. -### Example of Enums in Your Schema +### Şemanızda Enum Örnekleri -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +Bir pazar yerinde token sahiplik geçmişini izlemek için bir subgraph oluşturuyorsanız, her token `OriginalOwner`, `SecondOwner` ve `ThirdOwner` gibi farklı sahipliklerden geçebilir. Enum'ları kullanarak, bu belirli sahiplikleri tanımlayabilir ve yalnızca önceden tanımlanmış değerlerin atanmasını sağlayabilirsiniz. -You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. +Şemanızda enum tanımlayabilir ve bir kez tanımlandığında, bir varlık üzerinde bir enum alanı ayarlamak için enum değerlerinin dizi (string) gösterimini kullanabilirsiniz. -Here's what an enum definition might look like in your schema, based on the example above: +İşte yukarıdaki örneğe dayanarak, şemanızda bir enum tanımı şöyle görünebilir: ```graphql enum TokenStatus { @@ -24,109 +24,109 @@ enum TokenStatus { } ``` -This means that when you use the `TokenStatus` type in your schema, you expect it to be exactly one of predefined values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`, ensuring consistency and validity. +Bu, şemanızda `TokenStatus` türünü kullandığınızda, bunun tanımlı değerlerden tam olarak biri olmasını beklediğiniz anlamına gelir: `OriginalOwner`, `SecondOwner` veya `ThirdOwner`. Böylece tutarlılık ve geçerlilik sağlanmış olur. -To learn more about enums, check out [Creating a Subgraph](/developing/creating-a-subgraph/#enums) and [GraphQL documentation](https://graphql.org/learn/schema/#enumeration-types). +Enum'lar hakkında daha fazla bilgi edinmek için [Subgraph Oluşturma](/developing/creating-a-subgraph/#enums) ve [GraphQL dokümantasyonu](https://graphql.org/learn/schema/#enumeration-types) kaynaklarına göz atın. -## Benefits of Using Enums +## Enum Kullanmanın Faydaları -- **Clarity:** Enums provide meaningful names for values, making data easier to understand. -- **Validation:** Enums enforce strict value definitions, preventing invalid data entries. -- **Maintainability:** When you need to change or add new categories, enums allow you to do this in a focused manner. +- **Anlaşılırlık:** Enum'lar değerlere anlamlı isimler verir, veriyi daha anlaşılır hale getirir. +- **Doğrulama:** Enum'lar katı değer tanımlamaları uygulayarak geçersiz veri girişlerini önler. +- **Bakım Kolaylığı:** Yeni kategoriler eklemek veya mevcut olanları değiştirmek gerektiğinde, enum'lar bunu odaklı bir şekilde yapmanıza olanak tanır. -### Without Enums +### Enum'lar Olmadan -If you choose to define the type as a string instead of using an Enum, your code might look like this: +Türü Enum kullanmak yerine bir dize olarak tanımlamayı seçerseniz, kodunuz şöyle görünebilir: ```graphql type Token @entity { id: ID! tokenId: BigInt! - owner: Bytes! # Owner of the token - tokenStatus: String! # String field to track token status + owner: Bytes! # Token Sahibi + tokenStatus: String! # Token Durumunu Takip Eden Dize Alanı timestamp: BigInt! } ``` -In this schema, `TokenStatus` is a simple string with no specific, allowed values. +Bu şemada, `TokenStatus` belirli, alabileceği değerler sınırlandırılmış olmayan basit bir dizedir. #### Bu neden bir sorun? -- There's no restriction of `TokenStatus` values, so any string can be accidentally assigned. This makes it hard to ensure that only valid statuses like `OriginalOwner`, `SecondOwner`, or `ThirdOwner` are set. -- It's easy to make typos such as `Orgnalowner` instead of `OriginalOwner`, making the data and potential queries unreliable. +- `TokenStatus` değerleri için bir kısıtlama yoktur. Bu yüzden yanlışlıkla herhangi bir dize atanabilir. Bu, yalnızca `OriginalOwner`, `SecondOwner` veya `ThirdOwner` gibi geçerli durumların ayarlandığını sağlamayı zorlaştırır. +- `OriginalOwner` yerine `Orgnalowner` gibi yazım hataları yaparak verilerin ve potansiyel sorguların güvenilmez hale gelmesine sebep olmak kolaydır. -### With Enums +### Enum Kullanımıyla -Instead of assigning free-form strings, you can define an enum for `TokenStatus` with specific values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`. Using an enum ensures only allowed values are used. +Serbest formda dizeler atamak yerine, `TokenStatus` için `OriginalOwner`, `SecondOwner` veya `ThirdOwner` gibi belirli değerlerle bir enum tanımlanabilir. Bir enum kullanmak, yalnızca izin verilen değerlerin kullanılmasını sağlar. -Enums provide type safety, minimize typo risks, and ensure consistent and reliable results. +Enumlar; tür güvenliği sağlar, yazım hatası riskini en aza indirir ve tutarlı ve güvenilir sonuçlar sağlar. -## Defining Enums for NFT Marketplaces +## NFT Pazar Yerleri için Enum Tanımlama -> Note: The following guide uses the CryptoCoven NFT smart contract. +> Not: Aşağıdaki kılavuz CryptoCoven NFT akıllı sözleşmesini kullanmaktadır. -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +NFT'lerin ticaretinin yapıldığı çeşitli pazar yerleri için enum tanımlamak için subgraph şemanızda aşağıdakini kullanın: ```gql -# Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) +# CryptoCoven sözleşmesinin etkileşimde bulunduğu pazar yerleri için Enum (muhtemel bir Takas/Basım) enum Marketplace { - OpenSeaV1 # Represents when a CryptoCoven NFT is traded on the marketplace - OpenSeaV2 # Represents when a CryptoCoven NFT is traded on the OpenSeaV2 marketplace - SeaPort # Represents when a CryptoCoven NFT is traded on the SeaPort marketplace - LooksRare # Represents when a CryptoCoven NFT is traded on the LookRare marketplace - # ...and other marketplaces + OpenSeaV1 # CryptoCoven NFT'sinin bu pazar yerinde takas yapılmasını temsil eder + OpenSeaV2 # CryptoCoven NFT'si ninOpenSeaV2 pazar yerinde takas yapılmasını temsil eder + SeaPort # CryptoCoven NFT'sinin SeaPort pazar yerinde takas yapılmasını temsil eder + LooksRare # CryptoCoven NFT'sinin LookRare pazar yerinde takas yapılmasını temsil eder + # ...ve diğer pazar yerleri } ``` -## Using Enums for NFT Marketplaces +## NFT Pazar Yerleri için Enum Kullanımı -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +Tanımlandıktan sonra, enum'lar işlemleri veya olayları kategorize etmek için subgraph'inizde kullanılabilir. -For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. +Örneğin, NFT satışlarını kaydederken takasta yer alan pazar yerini enum kullanarak belirleyebilirsiniz. -### Implementing a Function for NFT Marketplaces +### NFT Pazar Yerleri için Bir Fonksiyon İmplementasyonu -Here's how you can implement a function to retrieve the marketplace name from the enum as a string: +Enum'dan pazar yeri adını bir dize olarak almak için bir fonksiyonu şöyle uygulayabilirsiniz: ```ts export function getMarketplaceName(marketplace: Marketplace): string { - // Using if-else statements to map the enum value to a string + // Enum değerini bir dizeye eşlemek için if-else ifadelerini kullanma if (marketplace === Marketplace.OpenSeaV1) { - return 'OpenSeaV1' // If the marketplace is OpenSea, return its string representation + return 'OpenSeaV1' // I Eğer pazar yeri OpenSea ise, onun dize temsilini döndür } else if (marketplace === Marketplace.OpenSeaV2) { return 'OpenSeaV2' } else if (marketplace === Marketplace.SeaPort) { - return 'SeaPort' // If the marketplace is SeaPort, return its string representation + return 'SeaPort' // Eğer pazar yeri SeaPort ise, onun dize temsilini döndür } else if (marketplace === Marketplace.LooksRare) { - return 'LooksRare' // If the marketplace is LooksRare, return its string representation - // ... and other market places + return 'LooksRare' // Eğer pazar yeri LooksRare ise, onun dize temsilini döndür + // ... ve diğer pazar yerleri } } ``` -## Best Practices for Using Enums +## Enum Kullanımı için En İyi Uygulamalar -- **Consistent Naming:** Use clear, descriptive names for enum values to improve readability. -- **Centralized Management:** Keep enums in a single file for consistency. This makes enums easier to update and ensures they are the single source of truth. -- **Documentation:** Add comments to enum to clarify their purpose and usage. +- **Tutarlı İsimlendirme:** Okunabilirliği artırmak için enum değerleri için net, açıklayıcı isimler kullanın. +- **Merkezi Yönetim:** Tutarlılık için enum'ları tek bir dosyada tutun. Böylece enum'ların güncellenmesi kolaylaşmış olur ve onların tek bir doğru bilgi kaynağı olmasını sağlar. +- **Dokümantasyon:** Amaçlarını ve kullanımını açıklamak için enum'a yorumlar ekleyin. -## Using Enums in Queries +## Sorgularda Enum Kullanımı -Enums in queries help you improve data quality and make your results easier to interpret. They function as filters and response elements, ensuring consistency and reducing errors in marketplace values. +Sorgulardaki enum'lar verilerin kalitesini artırmanıza ve sonuçları daha kolay yorumlamanıza yardımcı olur. Enumlar filtreleme ve yanıt ögeleri olarak işlev görürler, tutarlılığı sağlarlar ve pazar yerlerindeki hataları azaltırlar. -**Specifics** +**Ayrıntılar** -- **Filtering with Enums:** Enums provide clear filters, allowing you to confidently include or exclude specific marketplaces. -- **Enums in Responses:** Enums guarantee that only recognized marketplace names are returned, making the results standardized and accurate. +- **Enum ile Filtreleme:** Enum'lar net filtreler sağlar, belirli pazarları güvenle dahil etmenizi veya hariç tutmanızı mümkün kılar. +- **Yanıtlarda Enum'lar:** Enum'lar yalnızca tanınan pazar adlarının döndürülmesini garanti eder, bu da sonuçları standart ve isabetli hale getirir. -### Sample Queries +### Örnek Sorgular -#### Query 1: Account With The Highest NFT Marketplace Interactions +#### Sorgu 1: En Yüksek NFT Pazar Yeri Etkileşimine Sahip Hesap -This query does the following: +Bu sorgu şunları yapar: -- It finds the account with the highest unique NFT marketplace interactions, which is great for analyzing cross-marketplace activity. -- The marketplaces field uses the marketplace enum, ensuring consistent and validated marketplace values in the response. +- Farklı pazar yerlerinde en yüksek benzersiz NFT etkileşimlerine sahip hesabı bulur. Bu da çapraz pazar yeri aktivitelerini analiz etmek için mükemmeldir. +- Pazar yerleri alanı, yanıt içerisindeki pazar yeri değerlerini tutarlı ve doğrulanmış hale getiren pazar yeri enum'ını kullanır. ```gql { @@ -137,15 +137,15 @@ This query does the following: totalSpent uniqueMarketplacesCount marketplaces { - marketplace # This field returns the enum value representing the marketplace + marketplace # Bu alan, pazar yerini temsil eden enum değerini döndürür. } } } ``` -#### Returns +#### Dönüşler -This response provides account details and a list of unique marketplace interactions with enum values for standardized clarity: +Bu yanıt; hesap detaylarını, ve netlik sağlamak amacıyla enum değerlerine sahip benzersiz pazar yeri etkileşimlerinin listesini sağlar: ```gql { @@ -186,12 +186,12 @@ This response provides account details and a list of unique marketplace interact } ``` -#### Query 2: Most Active Marketplace for CryptoCoven transactions +#### Sorgu 2: CryptoCoven İşlemleri için En Aktif Pazar Yeri -This query does the following: +Bu sorgu şunları yapar: -- It identifies the marketplace with the highest volume of CryptoCoven transactions. -- It uses the marketplace enum to ensure that only valid marketplace types appear in the response, adding reliability and consistency to your data. +- CryptoCoven işlemlerinin en yüksek hacimli olduğu pazar yerini belirler. +- Yalnızca geçerli pazar yeri türlerinin yanıt olarak görünmesini sağlamak için pazar yeri enum'ını kullanarak verilerinize güvenilirlik ve tutarlılık katar. ```gql { @@ -202,9 +202,9 @@ This query does the following: } ``` -#### Result 2 +#### Sonuç 2 -The expected response includes the marketplace and the corresponding transaction count, using the enum to indicate the marketplace type: +Beklenen yanıt, pazar yerini ve ilgili işlem sayısını içerir; pazar yeri türünü belirtmek için enum kullanır: ```gql { @@ -219,12 +219,12 @@ The expected response includes the marketplace and the corresponding transaction } ``` -#### Query 3: Marketplace Interactions with High Transaction Counts +#### Sorgu 3: Yüksek İşlem Sayısına Sahip Pazar Etkileşimleri -This query does the following: +Bu sorgu şunları yapar: -- It retrieves the top four marketplaces with over 100 transactions, excluding "Unknown" marketplaces. -- It uses enums as filters to ensure that only valid marketplace types are included, increasing accuracy. +- "Unknown" pazarlarını hariç tutarak, 100'den fazla işlemi olan ilk dört pazarı getirir. +- Yalnızca geçerli pazar türlerinin dahil edilmesini sağlamak için filtre olarak enum'lar kullanır. Böylece doğruluk oranı arttırılmış olur. ```gql { @@ -240,9 +240,9 @@ This query does the following: } ``` -#### Result 3 +#### Sonuç 3 -Expected output includes the marketplaces that meet the criteria, each represented by an enum value: +Beklenen çıktı, her biri bir enum değeri ile temsil edilen, kriterleri karşılayan pazarları içerir: ```gql { @@ -271,4 +271,4 @@ Expected output includes the marketplaces that meet the criteria, each represent ## Ek Kaynaklar -For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). +Ek bilgi için bu rehberin [deposuna](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums) göz atın. From 5c05b9ea62fcc9f262363f628f50896c8c9816c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:32 -0500 Subject: [PATCH 0631/1534] New translations enums.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/cookbook/enums.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/enums.mdx b/website/src/pages/uk/subgraphs/cookbook/enums.mdx index a10970c1539f..4fa07dc05765 100644 --- a/website/src/pages/uk/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/enums.mdx @@ -269,6 +269,6 @@ Expected output includes the marketplaces that meet the criteria, each represent } ``` -## Additional Resources +## Додаткові матеріали For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). From 42c0e489038342432fdc3b5b2c8a4c4e8f723be3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:34 -0500 Subject: [PATCH 0632/1534] New translations enums.mdx (Marathi) --- website/src/pages/mr/subgraphs/cookbook/enums.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/enums.mdx b/website/src/pages/mr/subgraphs/cookbook/enums.mdx index a10970c1539f..081add904f9a 100644 --- a/website/src/pages/mr/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/enums.mdx @@ -269,6 +269,6 @@ Expected output includes the marketplaces that meet the criteria, each represent } ``` -## Additional Resources +## अतिरिक्त संसाधने For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). From 65eff738ce170187b72f90c570677f25b85e5ed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:35 -0500 Subject: [PATCH 0633/1534] New translations enums.mdx (Hindi) --- .../src/pages/hi/subgraphs/cookbook/enums.mdx | 142 +++++++++--------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/enums.mdx b/website/src/pages/hi/subgraphs/cookbook/enums.mdx index 6ce7f9e84551..28fc4517ca23 100644 --- a/website/src/pages/hi/subgraphs/cookbook/enums.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/enums.mdx @@ -1,20 +1,20 @@ --- -title: Categorize NFT Marketplaces Using Enums +title: NFT मार्केटप्लेस को Enums का उपयोग करके श्रेणीबद्ध करें --- -Use Enums to make your code cleaner and less error-prone. Here's a full example of using Enums on NFT marketplaces. +Enams का उपयोग करके अपने कोड को साफ और कम त्रुटिपूर्ण बनाएं। यहां NFT मार्केटप्लेस पर Enams के उपयोग का एक पूरा उदाहरण है। -## What are Enums? +## Enums क्या हैं? -Enums, or enumeration types, are a specific data type that allows you to define a set of specific, allowed values. +Enums, या enumeration types, एक विशिष्ट डेटा प्रकार होते हैं जो आपको विशिष्ट, अनुमत मानों का एक सेट परिभाषित करने की अनुमति देते हैं। -### Example of Enums in Your Schema +### अपने Schema में Enums का उदाहरण -If you're building a subgraph to track the ownership history of tokens on a marketplace, each token might go through different ownerships, such as `OriginalOwner`, `SecondOwner`, and `ThirdOwner`. By using enums, you can define these specific ownerships, ensuring only predefined values are assigned. +यदि आप एक subgraph बना रहे हैं जो एक मार्केटप्लेस पर टोकन के स्वामित्व इतिहास को ट्रैक करता है, तो प्रत्येक टोकन विभिन्न स्वामित्वों से गुजर सकता है, जैसे कि OriginalOwner, SecondOwner, और ThirdOwner। enums का उपयोग करके, आप इन विशिष्ट स्वामित्वों को परिभाषित कर सकते हैं, यह सुनिश्चित करते हुए कि केवल पूर्वनिर्धारित मान ही सौंपे जाएं। -You can define enums in your schema, and once defined, you can use the string representation of the enum values to set an enum field on an entity. +आप अपनी स्कीमा में एन्सम्स (enums) को परिभाषित कर सकते हैं, और एक बार परिभाषित हो जाने के बाद, आप एन्सम के मानों की स्ट्रिंग प्रस्तुति का उपयोग करके एक एन्सम फ़ील्ड को एक entities पर सेट कर सकते हैं। -Here's what an enum definition might look like in your schema, based on the example above: +यहां आपके स्कीमा में एक enum परिभाषा इस प्रकार हो सकती है, उपरोक्त उदाहरण के आधार पर: ```graphql enum TokenStatus { @@ -24,19 +24,19 @@ enum TokenStatus { } ``` -This means that when you use the `TokenStatus` type in your schema, you expect it to be exactly one of predefined values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`, ensuring consistency and validity. +यह इसका मतलब है कि जब आप अपने स्कीमा में TokenStatus प्रकार का उपयोग करते हैं, तो आप इसकी अपेक्षा करते हैं कि यह पहले से परिभाषित मानों में से एक हो: OriginalOwner, SecondOwner, या ThirdOwner, जिससे निरंतरता और वैधता सुनिश्चित होती है। -To learn more about enums, check out [Creating a Subgraph](/developing/creating-a-subgraph/#enums) and [GraphQL documentation](https://graphql.org/learn/schema/#enumeration-types). +इस बारे में अधिक जानने के लिए Creating a Subgraph(/developing/creating-a-subgraph/#enums) और GraphQL documentation(https://graphql.org/learn/schema/#enumeration-types) देखें। -## Benefits of Using Enums +## Enums का उपयोग करने के लाभ -- **Clarity:** Enums provide meaningful names for values, making data easier to understand. -- **Validation:** Enums enforce strict value definitions, preventing invalid data entries. -- **Maintainability:** When you need to change or add new categories, enums allow you to do this in a focused manner. +- स्पष्टता: Enums एन्उम्स मानों के लिए सार्थक नाम प्रदान करते हैं, जिससे डेटा को समझना आसान होता है। +- सत्यापन: Enums कड़े मान मान्यताएँ लागू करते हैं, जो अवैध डेटा प्रविष्टियों को रोकते हैं। +- रखरखाव: जब आपको नए श्रेणियाँ या ईनम्स (enums) जोड़ने या बदलने की आवश्यकता हो, तो आप इसे एक केंद्रित तरीके से कर सकते हैं। -### Without Enums +### बिना Enums -If you choose to define the type as a string instead of using an Enum, your code might look like this: +यदि आप Enum का उपयोग करने के बजाय प्रकार को एक स्ट्रिंग के रूप में परिभाषित करते हैं, तो आपका कोड इस प्रकार दिख सकता है: ```graphql type Token @entity { @@ -48,85 +48,85 @@ type Token @entity { } ``` -In this schema, `TokenStatus` is a simple string with no specific, allowed values. +इस स्कीमा में, TokenStatus एक साधारण स्ट्रिंग है जिसमें कोई विशिष्ट, अनुमत मान नहीं होते हैं। #### यह एक समस्या क्यों है? -- There's no restriction of `TokenStatus` values, so any string can be accidentally assigned. This makes it hard to ensure that only valid statuses like `OriginalOwner`, `SecondOwner`, or `ThirdOwner` are set. -- It's easy to make typos such as `Orgnalowner` instead of `OriginalOwner`, making the data and potential queries unreliable. +- TokenStatus मानों की कोई सीमा नहीं है, इसलिए कोई भी स्ट्रिंग गलती से असाइन की जा सकती है। इससे यह सुनिश्चित करना कठिन हो जाता है कि केवल वैध स्टेटस जैसे OriginalOwner, SecondOwner, या ThirdOwner सेट किए जाएं। +- यह टाइपो करना आसान है जैसे Orgnalowner को OriginalOwner के बजाय, जिससे डेटा और संभावित queries अप्रतिबद्ध हो सकती हैं। -### With Enums +### Enums के साथ -Instead of assigning free-form strings, you can define an enum for `TokenStatus` with specific values: `OriginalOwner`, `SecondOwner`, or `ThirdOwner`. Using an enum ensures only allowed values are used. +इसके बजाय कि आप फ्री-फॉर्म स्ट्रिंग्स असाइन करें, आप TokenStatus के लिए एक enum परिभाषित कर सकते हैं जिसमें विशिष्ट मान हों: OriginalOwner, SecondOwner, या ThirdOwner। enum का उपयोग करने से यह सुनिश्चित होता है कि केवल अनुमत मान ही उपयोग किए जाएं। -Enums provide type safety, minimize typo risks, and ensure consistent and reliable results. +Enums प्रकार सुरक्षा प्रदान करते हैं, टाइपो के जोखिम को कम करते हैं, और सुनिश्चित करते हैं कि परिणाम लगातार और विश्वसनीय हों। -## Defining Enums for NFT Marketplaces +## NFT मार्केटप्लेस के लिए एन्उम्स को परिभाषित करना -> Note: The following guide uses the CryptoCoven NFT smart contract. +> नोट: निम्नलिखित guide CryptoCoven NFT स्मार्ट कॉन्ट्रैक्ट का उपयोग करती है। -To define enums for the various marketplaces where NFTs are traded, use the following in your subgraph schema: +NFTs जहां ट्रेड होते हैं, उन विभिन्न मार्केटप्लेस के लिए enums को परिभाषित करने के लिए, अपने Subgraph स्कीमा में निम्नलिखित का उपयोग करें: ```gql -# Enum for Marketplaces that the CryptoCoven contract interacted with(likely a Trade/Mint) +#मार्केटप्लेस के लिए Enum जो CryptoCoven कॉन्ट्रैक्ट के साथ इंटरएक्टेड हैं (संभवत: ट्रेड/मिंट) enum Marketplace { - OpenSeaV1 # Represents when a CryptoCoven NFT is traded on the marketplace - OpenSeaV2 # Represents when a CryptoCoven NFT is traded on the OpenSeaV2 marketplace - SeaPort # Represents when a CryptoCoven NFT is traded on the SeaPort marketplace - LooksRare # Represents when a CryptoCoven NFT is traded on the LookRare marketplace - # ...and other marketplaces + OpenSeaV1 # जब CryptoCoven NFT को इस बाजार में व्यापार किया जाता है + OpenSeaV2 # जब CryptoCoven NFT को OpenSeaV2 बाजार में व्यापार किया जाता है + SeaPort # जब CryptoCoven NFT को SeaPort बाजार में व्यापार किया जाता है + LooksRare # जब CryptoCoven NFT को LooksRare बाजार में व्यापार किया जाता है + # ...और अन्य बाजार } ``` -## Using Enums for NFT Marketplaces +## NFT Marketplaces के लिए Enums का उपयोग -Once defined, enums can be used throughout your subgraph to categorize transactions or events. +एक बार परिभाषित होने पर, enums का उपयोग आपके subgraph में transactions या events को श्रेणीबद्ध करने के लिए किया जा सकता है। -For example, when logging NFT sales, you can specify the marketplace involved in the trade using the enum. +उदाहरण के लिए, जब logging NFT बिक्री लॉग करते हैं, तो आप ट्रेड में शामिल मार्केटप्लेस को enum का उपयोग करके निर्दिष्ट कर सकते हैं। -### Implementing a Function for NFT Marketplaces +### NFT मार्केटप्लेस के लिए एक फंक्शन लागू करना -Here's how you can implement a function to retrieve the marketplace name from the enum as a string: +यहाँ बताया गया है कि आप एक फ़ंक्शन को कैसे लागू कर सकते हैं जो enum से मार्केटप्लेस का नाम एक स्ट्रिंग के रूप में प्राप्त करता है: ```ts export function getMarketplaceName(marketplace: Marketplace): string { - // Using if-else statements to map the enum value to a string + // यदि-और-else कथनों का उपयोग करके enum मान को एक स्ट्रिंग में मैप करें if (marketplace === Marketplace.OpenSeaV1) { - return 'OpenSeaV1' // If the marketplace is OpenSea, return its string representation + return 'OpenSeaV1' // यदि बाज़ार OpenSea है, तो इसकी स्ट्रिंग प्रतिनिधित्व लौटाएँ } else if (marketplace === Marketplace.OpenSeaV2) { return 'OpenSeaV2' } else if (marketplace === Marketplace.SeaPort) { - return 'SeaPort' // If the marketplace is SeaPort, return its string representation + return 'SeaPort' // यदि बाज़ार SeaPort है, तो इसकी स्ट्रिंग प्रतिनिधित्व लौटाएँ } else if (marketplace === Marketplace.LooksRare) { - return 'LooksRare' // If the marketplace is LooksRare, return its string representation - // ... and other market places + return 'LooksRare' // यदि बाज़ार LooksRare है, तो इसकी स्ट्रिंग प्रतिनिधित्व लौटाएँ + // ... और अन्य बाज़ार } } ``` -## Best Practices for Using Enums +## Enums का उपयोग करने के लिए सर्वोत्तम प्रथाएँ -- **Consistent Naming:** Use clear, descriptive names for enum values to improve readability. -- **Centralized Management:** Keep enums in a single file for consistency. This makes enums easier to update and ensures they are the single source of truth. -- **Documentation:** Add comments to enum to clarify their purpose and usage. +- सुसंगत नामकरण: पठनीयता को बेहतर बनाने के लिए enum मानों के लिए स्पष्ट, वर्णनात्मक नामों का उपयोग करें। +- केंद्रीकृत प्रबंधन: एकल फ़ाइल में enums रखें ताकि सुसंगतता बनी रहे। इससे enums को अपडेट करना आसान हो जाता है और यह सत्य का एकमात्र source बनता है। +- दस्तावेज़ीकरण: एनम में उनकी उद्देश्य और उपयोग को स्पष्ट करने के लिए टिप्पणियाँ जोड़ें। -## Using Enums in Queries +## queries में Enums का उपयोग करना -Enums in queries help you improve data quality and make your results easier to interpret. They function as filters and response elements, ensuring consistency and reducing errors in marketplace values. +क्वेरी में Enums आपके डेटा की गुणवत्ता में सुधार करने और आपके परिणामों को समझने में आसान बनाने में मदद करते हैं। ये फ़िल्टर और प्रतिक्रिया तत्व के रूप में कार्य करते हैं, बाज़ार के मूल्यों में स्थिरता सुनिश्चित करते हैं और त्रुटियों को कम करते हैं। -**Specifics** +**विशिष्टताएँ** -- **Filtering with Enums:** Enums provide clear filters, allowing you to confidently include or exclude specific marketplaces. -- **Enums in Responses:** Enums guarantee that only recognized marketplace names are returned, making the results standardized and accurate. +- **Enums के साथ फ़िल्टरिंग:** Enums स्पष्ट फ़िल्टर प्रदान करते हैं, जिससे आप निश्चित रूप से विशिष्ट मार्केटप्लेस को शामिल या बाहर कर सकते हैं। +- **प्रतिसादों में Enums:** एन्‍यम्‍स यह सुनिश्चित करते हैं कि केवल मान्यता प्राप्त मार्केटप्लेस नाम ही वापस आएं, जिससे परिणाम मानकीकृत और सटीक हों। -### Sample Queries +### नमूना queries -#### Query 1: Account With The Highest NFT Marketplace Interactions +#### Query 1: सबसे अधिक NFT मार्केटप्लेस इंटरएक्शन वाला खाता -This query does the following: +यह क्वेरी निम्नलिखित कार्य करती है: -- It finds the account with the highest unique NFT marketplace interactions, which is great for analyzing cross-marketplace activity. -- The marketplaces field uses the marketplace enum, ensuring consistent and validated marketplace values in the response. +- यह खाते को खोजता है जिसमें सबसे अधिक अनूठे NFT मार्केटप्लेस इंटरैक्शन होते हैं, जो क्रॉस-मार्केटप्लेस गतिविधि का विश्लेषण करने के लिए बेहतरीन है। +- मार्केटप्लेस फील्ड marketplace एनम का उपयोग करता है, जो प्रतिक्रिया में सुसंगत और मान्य मार्केटप्लेस मान सुनिश्चित करता है। ```gql { @@ -143,9 +143,9 @@ This query does the following: } ``` -#### Returns +#### रिटर्न्स -This response provides account details and a list of unique marketplace interactions with enum values for standardized clarity: +यह प्रतिक्रिया खाता विवरण और मानकीकृत स्पष्टता के लिए एनम मानों के साथ अद्वितीय मार्केटप्लेस इंटरैक्शन्स की सूची प्रदान करती है: ```gql { @@ -186,12 +186,12 @@ This response provides account details and a list of unique marketplace interact } ``` -#### Query 2: Most Active Marketplace for CryptoCoven transactions +#### Query 2: CryptoCoven transactions के लिए सबसे सक्रिय बाज़ार -This query does the following: +यह क्वेरी निम्नलिखित कार्य करती है: -- It identifies the marketplace with the highest volume of CryptoCoven transactions. -- It uses the marketplace enum to ensure that only valid marketplace types appear in the response, adding reliability and consistency to your data. +- यह उस मार्केटप्लेस की पहचान करता है जहां CryptoCoven लेनदेन का सबसे अधिक वॉल्यूम होता है। +- यह मार्केटप्लेस enum का उपयोग करता है ताकि प्रतिक्रिया में केवल मान्य मार्केटप्लेस प्रकार ही दिखाई दें, जिससे आपके डेटा में विश्वसनीयता और स्थिरता बनी रहती है। ```gql { @@ -202,9 +202,9 @@ This query does the following: } ``` -#### Result 2 +#### परिणाम 2 -The expected response includes the marketplace and the corresponding transaction count, using the enum to indicate the marketplace type: +अपेक्षित प्रतिक्रिया में मार्केटप्लेस और संबंधित transaction संख्या शामिल है, जो मार्केटप्लेस प्रकार को संकेत करने के लिए enum का उपयोग करती है: ```gql { @@ -219,12 +219,12 @@ The expected response includes the marketplace and the corresponding transaction } ``` -#### Query 3: Marketplace Interactions with High Transaction Counts +#### प्रश्न 3: उच्च लेन-देन गणना के साथ बाज़ार परस्पर क्रियाएँ -This query does the following: +यह क्वेरी निम्नलिखित कार्य करती है: -- It retrieves the top four marketplaces with over 100 transactions, excluding "Unknown" marketplaces. -- It uses enums as filters to ensure that only valid marketplace types are included, increasing accuracy. +- यह 100 से अधिक transactions वाले शीर्ष चार बाजारों को पुनः प्राप्त करता है, "Unknown" बाजारों को छोड़कर। +- यह केवल वैध मार्केटप्लेस प्रकारों को शामिल करने के लिए फ़िल्टर के रूप में एंनम का उपयोग करता है, जिससे सटीकता बढ़ती है। ```gql { @@ -240,9 +240,9 @@ This query does the following: } ``` -#### Result 3 +#### परिणाम 3 -Expected output includes the marketplaces that meet the criteria, each represented by an enum value: +अपेक्षित आउटपुट में उन मार्केटप्लेस का समावेश है जो मानदंडों को पूरा करते हैं, प्रत्येक को एक enum मान द्वारा प्रदर्शित किया जाता है: ```gql { @@ -269,6 +269,6 @@ Expected output includes the marketplaces that meet the criteria, each represent } ``` -## अतिरिक्त संसाधन +## Additional Resources -For additional information, check out this guide's [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums). +अधिक जानकारी के लिए, इस guide's के [repo](https://github.com/chidubemokeke/Subgraph-Tutorial-Enums) को देखें। From 0255383ca629d636fc94a072d8e1132eb48158a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:37 -0500 Subject: [PATCH 0634/1534] New translations grafting-hotfix.mdx (Romanian) --- website/src/pages/ro/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ro/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ro/subgraphs/cookbook/grafting-hotfix.mdx index a0bd3f4ab1c2..5b191eb8e3d4 100644 --- a/website/src/pages/ro/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From b068b8df042fdcdd7a915658daf81e7ef24d79a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:38 -0500 Subject: [PATCH 0635/1534] New translations grafting-hotfix.mdx (French) --- website/src/pages/fr/subgraphs/cookbook/grafting-hotfix.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/fr/subgraphs/cookbook/grafting-hotfix.mdx index 57cd57c1250f..53c5c371278e 100644 --- a/website/src/pages/fr/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR @@ -164,7 +165,7 @@ Grafting is an effective strategy for deploying hotfixes in subgraph development However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. -## Ressources additionnelles +## Ressources supplémentaires - **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting - **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. From c8144e5cb28da551e033ad6016244d8bbb3c9f95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:39 -0500 Subject: [PATCH 0636/1534] New translations grafting-hotfix.mdx (Spanish) --- website/src/pages/es/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/es/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/es/subgraphs/cookbook/grafting-hotfix.mdx index ddf69bb91735..dcff0e105e37 100644 --- a/website/src/pages/es/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/es/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From b9e352b508ee772afbc4050fdf16e53c26295876 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:40 -0500 Subject: [PATCH 0637/1534] New translations grafting-hotfix.mdx (Arabic) --- website/src/pages/ar/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ar/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ar/subgraphs/cookbook/grafting-hotfix.mdx index 2bbe296c724a..ec5c1e2f8248 100644 --- a/website/src/pages/ar/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 9f85cb500becac7de65eff4b9dc43c5087276f74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:41 -0500 Subject: [PATCH 0638/1534] New translations grafting-hotfix.mdx (Czech) --- website/src/pages/cs/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/cs/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/cs/subgraphs/cookbook/grafting-hotfix.mdx index 934f1dd419c3..0a29321ab859 100644 --- a/website/src/pages/cs/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 4ba8a84855dff5eeb1891d497fcb02c8a3d92837 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:42 -0500 Subject: [PATCH 0639/1534] New translations grafting-hotfix.mdx (German) --- website/src/pages/de/subgraphs/cookbook/grafting-hotfix.mdx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/de/subgraphs/cookbook/grafting-hotfix.mdx index a0bd3f4ab1c2..088434ce43a1 100644 --- a/website/src/pages/de/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/de/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,12 +1,13 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. -### Overview +### Überblick This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. @@ -164,7 +165,7 @@ Grafting is an effective strategy for deploying hotfixes in subgraph development However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. -## Additional Resources +## Zusätzliche Ressourcen - **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting - **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. From b0f91471d5f8314b19710ce991ed6e68331bf2c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:43 -0500 Subject: [PATCH 0640/1534] New translations grafting-hotfix.mdx (Italian) --- website/src/pages/it/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/it/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/it/subgraphs/cookbook/grafting-hotfix.mdx index 53c1bae83194..d6b080d7f19f 100644 --- a/website/src/pages/it/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/it/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 4dbee78c860220ef7cb6f9e644084d5dfc22f4f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:44 -0500 Subject: [PATCH 0641/1534] New translations grafting-hotfix.mdx (Japanese) --- website/src/pages/ja/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ja/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ja/subgraphs/cookbook/grafting-hotfix.mdx index ce3e717c103a..7cf02e466288 100644 --- a/website/src/pages/ja/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 68b6d76d541bdf02f106eb272b213a10cded01dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:45 -0500 Subject: [PATCH 0642/1534] New translations grafting-hotfix.mdx (Korean) --- website/src/pages/ko/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ko/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ko/subgraphs/cookbook/grafting-hotfix.mdx index a0bd3f4ab1c2..5b191eb8e3d4 100644 --- a/website/src/pages/ko/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 77b6e3086544e6b2da25a3ad6d6c9611b286cab5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:46 -0500 Subject: [PATCH 0643/1534] New translations grafting-hotfix.mdx (Dutch) --- website/src/pages/nl/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/nl/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/nl/subgraphs/cookbook/grafting-hotfix.mdx index a0bd3f4ab1c2..5b191eb8e3d4 100644 --- a/website/src/pages/nl/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From facd5a926af04d012a9268423dd4e10342a9d714 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:47 -0500 Subject: [PATCH 0644/1534] New translations grafting-hotfix.mdx (Polish) --- website/src/pages/pl/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/pl/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/pl/subgraphs/cookbook/grafting-hotfix.mdx index a0bd3f4ab1c2..5b191eb8e3d4 100644 --- a/website/src/pages/pl/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From f86d30c65f8944844754a6118dcd728cd4d8cbeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:48 -0500 Subject: [PATCH 0645/1534] New translations grafting-hotfix.mdx (Portuguese) --- .../pt/subgraphs/cookbook/grafting-hotfix.mdx | 153 +++++++++--------- 1 file changed, 77 insertions(+), 76 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/pt/subgraphs/cookbook/grafting-hotfix.mdx index b09d54c13373..a47b68759200 100644 --- a/website/src/pages/pt/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,64 +1,65 @@ --- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +title: "Melhores Práticas de Subgraph #6 - Use Enxertos para Implantar Hotfixes Mais Rápido" +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. +O enxerto é uma função poderosa na programação de subgraphs, que permite a construção e implantação de novos subgraphs enquanto recicla os dados indexados dos já existentes. ### Visão geral -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. +Esta função permite a implantação rápida de hotfixes para problemas críticos, eliminando a necessidade de indexar o subgraph inteiro do zero novamente. Ao preservar dados históricos, enxertar diminui o tempo de espera e garante a continuidade em serviços de dados. -## Benefits of Grafting for Hotfixes +## Benefícios de Enxertos para Hotfixes -1. **Rapid Deployment** +1. **Lançamento Rápido** - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + - **Espera Minimizada**: Quando um subgraph encontra um erro crítico e para de indexar, um enxerto permite que seja lançada uma solução imediata, sem esperar uma nova indexação. + - **Recuperação Imediata**: O novo subgraph continua do último bloco indexado, garantindo o funcionamento ininterrupto dos serviços de dados. -2. **Data Preservation** +2. **Preservação de Dados** - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + - **Reaproveitamento de Dados Históricos**: O enxerto copia os dados existentes do subgraph de origem; assim, não há como perder dados históricos valiosos. + - **Consistência**: Mantém a continuidade de dados, que é crucial para aplicativos que dependem de dados históricos consistentes. -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. +3. **Eficiência** + - **Economia de Tempo e Recursos**: Evita o fardo computacional de repetir a indexação de grandes conjuntos de dados. + - **Foco em Conserto**: Os programadores podem se concentrar na solução de problemas, em vez de controlar a recuperação de dados. -## Best Practices When Using Grafting for Hotfixes +## Melhores Práticas ao Usar Enxertos para Hotfixes -1. **Initial Deployment Without Grafting** +1. \*Implantação Inicial sem Enxerto\*\* - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + - **Começar do Zero**: Sempre lance o seu subgraph inicial sem enxertos para que fique estável e funcione como esperado. + - **Fazer Testes Minuciosos:** Valide o desempenho do subgraph para minimizar a necessidade de hotfixes futuros. -2. **Implementing the Hotfix with Grafting** +2. **Implementação do Hotfix com Enxerto** - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + - **Identificar o Problema**: Quando ocorrer um erro crítico, determine o número de bloco do último evento indexado com êxito. + - **Criar um Novo Subgraph**: Programe um novo subgraph que inclui o hotfix. + - **Configure o Enxerto**: Use o enxerto para copiar dados até o número de bloco identificado do subgraph defeituoso. + - **Lance Rápido**: Edite o subgraph enxertado para reabrir o serviço o mais rápido possível. -3. **Post-Hotfix Actions** +3. **Depois do Hotfix** - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + - **Monitore o Desempenho**: Tenha certeza que o subgraph enxertado está a indexar corretamente, e que o hotfix pode resolver o problema. + - **Reedite Sem Enxertos**: Agora que está estável, lance uma nova versão do subgraph sem enxertos para fins de manutenção a longo prazo. + > Nota: Não é recomendado depender de enxertos indefinidamente, pois isto pode complicar a manutenção e implantação de futuras atualizações. + - **Atualize as Referências**: Redirecione quaisquer serviços ou aplicativos para que usem o novo subgraph, sem enxertos. -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. +4. **Considerações Importantes** + - **Selecione Blocos Corretamente**: Escolha o número de bloco do enxerto com cuidado, para evitar perdas de dados. + - **Dica**: Use o número de bloco do último evento corretamente processado. + - **Use a ID de Implantação**: Referencie a ID de Implantação do subgraph de origem, não a ID do Subgraph. + - **Nota**: A ID de Implantação é a identificadora única para uma implantação específica de subgraph. + - **Declaração de Funções**: Não se esqueça de declarar enxertos na lista de funções, no manifest do seu subgraph. -## Example: Deploying a Hotfix with Grafting +## Exemplo: Como Implantar um Subgraph com Enxertos -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. +Vamos supor que tens um subgraph a rastrear um contrato inteligente, que parou de indexar devido a um erro crítico. Veja como usar um enxerto para implementar um hotfix. -1. **Failed Subgraph Manifest (subgraph.yaml)** +1. **Manifest Falho de Subgraph (subgraph.yaml)** ```yaml specVersion: 1.0.0 @@ -87,7 +88,7 @@ Suppose you have a subgraph tracking a smart contract that has stopped indexing file: ./src/old-lock.ts ``` -2. **New Grafted Subgraph Manifest (subgraph.yaml)** +2. **Novo Manifest Enxertado de Subgraph (subgraph.yaml)** ```yaml specVersion: 1.0.0 schema: @@ -120,67 +121,67 @@ Suppose you have a subgraph tracking a smart contract that has stopped indexing block: 6000000 # Last successfully indexed block ``` -**Explanation:** +**Explicação:** -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. +- **Atualização de Fonte de Dados**: O novo subgraph aponta para 0xNewContractAddress, que pode ser uma versão consertada do contrato inteligente. +- **Bloco Inicial**: Configure para um bloco após o último indexado com êxito, para evitar processar o erro novamente. +- **Configuração de Enxerto**: + - **base**: ID de Implantação do subgraph falho. + - **block**: Número de blocos onde o enxerto deve começar. -3. **Deployment Steps** +3. **Etapas de Implantação** - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. + - **Atualize o Código**: Implemente o hotfix nos seus scripts de mapeamento (por exemplo, handleWithdrawal). + - **Ajuste o Manifest**: Conforme detalhado acima, atualize o `subgraph.yaml` com configurações de enxerto. + - **Lance o Subgraph**: + - Autentique com a Graph CLI. + - Lance o novo subgraph com `graph deploy`. -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. +4. **Após a Implantação** + - **Verifique a Indexação**: Verifique se o subgraph está a indexar corretamente a partir do ponto de enxerto. + - **Monitore os Dados**: Verifique se há novos dados sendo capturados, e se o hotfix funciona. + - **Planeie Para uma Reedição**: Prepare a implantação de uma versão não enxertada, para mais estabilidade a longo prazo. -## Warnings and Cautions +## Tome Cuidado -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. +O enxerto é uma ferramenta poderosa para implantar hotfixes rapidamente, mas deve ser evitado em algumas situações específicas — para manter a integridade dos dados e garantir o melhor desempenho. -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. +- **Mudanças Incompatíveis de Schema**: Se o seu hotfix exigir a alteração do tipo de campos existentes ou a remoção de campos do seu esquema, não é adequado fazer um enxerto. O enxerto espera que o esquema do novo subgraph seja compatível com o schema do subgráfico base. Alterações incompatíveis podem levar a inconsistências e erros de dados, porque os dados existentes não se alinham com o novo schema. +- **Mudanças Significantes na Lógica de Mapeamento**: Quando o hotfix envolve modificações substanciais na sua lógica de mapeamento — como alterar o processamento de eventos ​de funções do handler — o enxerto pode não funcionar corretamente. A nova lógica pode não ser compatível com os dados processados ​​sob a lógica antiga, levando a dados incorretos ou indexação com falha. +- **Implantações na The Graph Network:** Enxertos não são recomendados para subgraphs destinados à rede descentralizada (mainnet) do The Graph. Um enxerto pode complicar a indexação e pode não ser totalmente apoiado por todos os Indexers, o que pode causar comportamento inesperado ou aumento de custos. Para implantações de mainnet, é mais seguro recomeçar a indexação do subgraph do zero, para garantir total compatibilidade e confiabilidade. -### Risk Management +### **Controle de Riscos** -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. +- **Integridade de Dados**: Números de bloco incorretos podem causar perda ou duplicação de dados. +- **Testes**: Sempre experimente enxertos num ambiente de testes antes de lançá-los para produção. ## Conclusão -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: +O enxerto é uma estratégia eficaz para implantar hotfixes no desenvolvimento de subgraphs, e ainda permite: -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. +- **Se recuperar rapidamente** de erros críticos sem recomeçar a indexação. +- **Preservar dados históricos**, mantendo a continuidade tanto para aplicativos quanto para utilizadores. +- **Garantir a disponibilidade do serviço** ao minimizar o tempo de espera em períodos importantes de manutenção. -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. +No entanto, é importante usar enxertos com cuidado e seguir as melhores práticas para controlar riscos. Após estabilizar o seu subgraph com o hotfix, planeie a implantação de uma versão não enxertada para garantir a estabilidade a longo prazo. ## Outros Recursos -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. +- **[Documentação de Enxertos](/subgraphs/cookbook/grafting/)**: Substitua um Contrato e Mantenha o Seu Histórico com Enxertos +- **[Como Entender IDs de Implantação](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Aprenda a diferença entre ID de Implantação e ID de Subgraph. -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. +Ao incorporar enxertos ao seu fluxo de programação de subgraphs, é possível melhorar a sua capacidade de responder a problemas, garantindo que os seus serviços de dados permaneçam robustos e confiáveis. -## Subgraph Best Practices 1-6 +## Melhores Práticas para um Subgraph 1 – 6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Pruning: Reduza o Excesso de Dados do Seu Subgraph para Acelerar Queries](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Use o @derivedFrom para Melhorar a Resposta da Indexação e de Queries](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Melhore o Desempenho da Indexação e de Queries com o Uso de Bytes como IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Evite `eth-calls` para Acelerar a Indexação](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplifique e Otimize com Séries Temporais e Agregações](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Lance Hotfixes Mais Rápido com Enxertos](/subgraphs/cookbook/grafting-hotfix/) From 7ee33ee716a1770fdcd984503d370b40ca2251a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:49 -0500 Subject: [PATCH 0646/1534] New translations grafting-hotfix.mdx (Russian) --- .../ru/subgraphs/cookbook/grafting-hotfix.mdx | 165 +++++++++--------- 1 file changed, 83 insertions(+), 82 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ru/subgraphs/cookbook/grafting-hotfix.mdx index cfa312c965c4..0108d8b3cbf7 100644 --- a/website/src/pages/ru/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,64 +1,65 @@ --- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +title: Лучшая практика субграфов 6 — используйте графтинг для быстрого развертывания исправлений +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- -## TLDR +## Краткое содержание -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. +Графтинг — это мощная функция в разработке субграфов, которая позволяет создавать и разворачивать новые субграфы, повторно используя индексированные данные из существующих. ### Обзор -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. +Эта функция позволяет быстро развертывать исправления для критических ошибок, устраняя необходимость повторного индексирования всего субграфа с нуля. Сохраняя исторические данные, графтинг минимизирует время простоя и обеспечивает непрерывность работы сервисов данных. -## Benefits of Grafting for Hotfixes +## Преимущества графтинга для оперативных исправлений -1. **Rapid Deployment** +1. **Быстрое развертывание** - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + - **Минимизация времени простоя**: когда субграф сталкивается с критической ошибкой и перестает индексировать данные, графтинг позволяет немедленно развернуть исправление без необходимости ждать повторного индексирования. + - **Немедленное восстановление**: новый субграф продолжается с последнего индексированного блока, обеспечивая бесперебойную работу служб передачи данных. -2. **Data Preservation** +2. **Сохранение данных** - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + - **Повторное использование исторических данных**: графтинг копирует существующие данные из базового субграфа, что позволяет сохранить важные исторические записи. + - **Консистентность**: поддерживает непрерывность данных, что имеет решающее значение для приложений, полагающихся на согласованные исторические данные. -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. +3. **Эффективность** + - **Экономия времени и ресурсов**: избегает вычислительных затрат на повторное индексирование больших объемов данных. + - **Фокус на исправлениях**: позволяет разработчикам сосредоточиться на решении проблем, а не на восстановлении данных. -## Best Practices When Using Grafting for Hotfixes +## Лучшие практики при использовании графтинга для оперативных исправлений -1. **Initial Deployment Without Grafting** +1. **Первоначальное развертывание без графтинга** - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + - **Начните с чистого листа**: Всегда разворчивайте первоначальный субграф без использования графтинга, чтобы убедиться в его стабильности и корректной работе. + - **Тщательно тестируйте**: проверьте производительность субграфа, чтобы свести к минимуму необходимость в будущих исправлениях. -2. **Implementing the Hotfix with Grafting** +2. **Реализация исправления с использованием графтинга** - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + - **Определите проблему**: при возникновении критической ошибки определите номер блока последнего успешно проиндексированного события. + - **Создайте новый субграф**: разработайте новый субграф, включающий оперативное исправление. + - **Настройте графтинг**: используйте графтинг для копирования данных до определенного номера блока из неисправного субграфа. + - **Быстро разверните**: опубликуйте графтинговый (перенесенный) субграф, чтобы как можно скорее восстановить работу сервиса. -3. **Post-Hotfix Actions** +3. **Действия после оперативного исправления** - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + - **Мониторинг производительности**: убедитесь, что графтинговый (перенесенный) субграф индексируется правильно и исправление решает проблему. + - **Публикация без графтинга**: как только субграф стабилизируется, разверните его новую версию без использования графтинга для долгосрочного обслуживания. + > Примечание: Не рекомендуется использовать графтинг бесконечно, так как это может усложнить будущие обновления и обслуживание. + - **Обновите ссылки**: перенаправьте все сервисы или приложения на новый субграф без использования графтинга. -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. +4. **Важные замечания** + - **Тщательный выбор блока**: тщательно выбирайте номер блока графтинга, чтобы избежать потери данных. + - **Совет**: используйте номер блока последнего корректно обработанного события. + - **Используйте идентификатор развертывания**: убедитесь, что Вы ссылаетесь на идентификатор развертывания базового субграфа, а не на идентификатор субграфа. + - **Примечание**: идентификатор развертывания — это уникальный идентификатор для конкретного развертывания субграфа. + - **Объявление функции**: не забудьте указать использование графтинга в манифесте субграфа в разделе функций. -## Example: Deploying a Hotfix with Grafting +## Пример: развертывание оперативного исправления с использованием графтинга -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. +Предположим, у вас есть субграф, отслеживающий смарт-контракт, который перестал индексироваться из-за критической ошибки. Вот как Вы можете использовать графтинг для развертывания оперативного исправления. -1. **Failed Subgraph Manifest (subgraph.yaml)** +1. **Манифест неудачного субграфа (subgraph.yaml)** ```yaml specVersion: 1.0.0 @@ -87,7 +88,7 @@ Suppose you have a subgraph tracking a smart contract that has stopped indexing file: ./src/old-lock.ts ``` -2. **New Grafted Subgraph Manifest (subgraph.yaml)** +2. **Манифест нового субграфа с графтингом (subgraph.yaml)** ```yaml specVersion: 1.0.0 schema: @@ -99,7 +100,7 @@ Suppose you have a subgraph tracking a smart contract that has stopped indexing source: address: '0xNewContractAddress' abi: Lock - startBlock: 6000001 # Block after the last indexed block + startBlock: 6000001 # Блок после последнего индексированного блока mapping: kind: ethereum/events apiVersion: 0.0.7 @@ -116,71 +117,71 @@ Suppose you have a subgraph tracking a smart contract that has stopped indexing features: - grafting graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block + base: QmBaseDeploymentID # ID развертывания неудачного субграфа + block: 6000000 # Последний успешно индексированный блок ``` -**Explanation:** +**Пояснение:** -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. +- **Обновление источника данных**: новый субграф указывает на 0xNewContractAddress, который может быть исправленной версией смарт-контракта. +- **Начальный блок**: устанавливается на один блок после последнего успешно индексированного блока, чтобы избежать повторной обработки ошибки. +- **Конфигурация графтинга**: + - **base**: идентификатор развертывания неудачного субграфа. + - **block**: номер блока, с которого должен начаться графтинг. -3. **Deployment Steps** +3. **Шаги развертывания** - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. + - **Обновите код**: внедрите исправление в свои скрипты мэппинга (например, handleWithdrawal). + - **Отредактируйте манифест**: как показано выше, обновите файл `subgraph.yaml` с конфигурациями для графтинга. + - **Разверните субграф**: + - Аутентифицируйтесь с помощью Graph CLI. + - Разверните новый субграф используя `graph deploy`. -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. +4. **После развертывания** + - **Проверьте индексирование**: убедитесь, что субграф корректно индексирует данные с точки графтинга. + - **Следите за данными**: убедитесь, что новые данные индексируются и что исправление работает эффективно. + - **Запланируйте повторную публикацию**: запланируйте развертывание версии без графтинга для обеспечения долгосрочной стабильности. -## Warnings and Cautions +## Предупреждения и предостережения -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. +Хотя графтинг является мощным инструментом для быстрого развертывания исправлений, существуют конкретные сценарии, когда его следует избегать для поддержания целостности данных и обеспечения оптимальной производительности. -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. +- **Несовместимые изменения схемы**: если ваше исправление требует изменения типа существующих полей или удаления полей из схемы, графтинг не подходит. Графтинг предусматривает, что схема нового субграфа будет совместима со схемой базового субграфа. Несовместимые изменения могут привести к несоответствиям данных и ошибкам, так как существующие данные не будут соответствовать новой схеме. +- **Значительные изменения логики мэппинга**: когда исправление включает существенные изменения в вашей логике мэппинга, такие как изменение обработки событий или изменение функций обработчиков, графтинг может работать некорректно. Новая логика может быть несовместима с данными, обработанными по старой логике, что приведет к некорректным данным или сбоям в индексировании. +- **Развертывания в сеть The Graph**: графтинг не рекомендуется для субграфов, предназначенных для децентрализованной сети The Graph (майннет). Это может усложнить индексирование и не поддерживаться всеми Индексаторами, что может привести к непредсказуемому поведению или увеличению затрат. Для развертываний в майннете безопаснее перезапустить индексирование субграфа с нуля, чтобы обеспечить полную совместимость и надежность. -### Risk Management +### Управление рисками -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. +- **Целостность данных**: неверно указанные номера блоков могут привести к потере данных или их дублированию. +- **Тестирование**: всегда тестируйте графтинг в среде разработки перед развертыванием в рабочей среде. -## Conclusion +## Заключение -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: +Графтинг — это эффективная стратегия для развертывания оперативных исправлений в разработке субграфов, позволяющая Вам: -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. +- **Быстро восстанавливаться** после критических ошибок без повторного индексирования. +- **Сохранять исторические данные**, поддерживая непрерывности работы для приложений и пользователей. +- **Обеспечить доступность сервиса**, минимизируя время простоя при критических исправлениях. -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. +Однако важно использовать графтинг разумно и следовать лучшим практикам для снижения рисков. После стабилизации своего субграфа с помощью оперативных исправлений, спланируйте развертывание версии без графтинга для обеспечения долгосрочного обслуживания. -## Дополнительные источники +## Дополнительные ресурсы -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. +- **[Документация графтинга](/subgraphs/cookbook/grafting/)**: замените контракт и сохраните его историю с помощью графтинга +- **[Понимание идентификаторов развертывания](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: ознакомьтесь с разницей между идентификатором развертывания и идентификатором субграфа. -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. +Включив графтинг в процесс разработки субграфов, Вы сможете быстрее реагировать на проблемы, обеспечивая стабильность и надежность Ваших сервисов данных. -## Subgraph Best Practices 1-6 +## Лучшие практики для субграфов 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Увеличение скорости запросов с помощью обрезки субграфов](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Улучшение индексирования и отклика запросов с использованием @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Улучшение индексирования и производительности запросов с использованием неизменяемых объектов и байтов в качестве идентификаторов](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Увеличение скорости индексирования путем избегания `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Упрощение и оптимизация с помощью временных рядов и агрегаций](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Использование переноса (графтинга) для быстрого развертывания исправлений](/subgraphs/cookbook/grafting-hotfix/) From 08bd958e61b2f72a70d800b6762a4a4f24552c67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:50 -0500 Subject: [PATCH 0647/1534] New translations grafting-hotfix.mdx (Swedish) --- website/src/pages/sv/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/sv/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/sv/subgraphs/cookbook/grafting-hotfix.mdx index cd68f5b32a38..a6fec74aa61a 100644 --- a/website/src/pages/sv/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 5a34f9bdbe3bdfc20f0ec5870da5637249dc1fd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:51 -0500 Subject: [PATCH 0648/1534] New translations grafting-hotfix.mdx (Turkish) --- .../tr/subgraphs/cookbook/grafting-hotfix.mdx | 163 +++++++++--------- 1 file changed, 82 insertions(+), 81 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/tr/subgraphs/cookbook/grafting-hotfix.mdx index 1169b1e2b4eb..b30a8b6ecf5d 100644 --- a/website/src/pages/tr/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,64 +1,65 @@ --- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +title: Subgraph Örnek Uygulama 6 - Acil Güncelleme Dağıtımı için Aşılama Kullanın +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- -## TLDR +## Özet -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. +Aşılama, mevcut endekslenmiş verileri yeniden kullanarak yeni subgraph'ler oluşturmanıza ve dağıtmanıza olanak tanıyan güçlü bir özelliktir. ### Genel Bakış -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. +Bu özellik, kritik sorunlar için hızlı bir şekilde düzeltmelerin dağıtılmasını sağlar ve tüm subgraph'i baştan endeksleme ihtiyacını ortadan kaldırır. Aşılama, tarihsel verileri koruyarak kesinti sürelerini en aza indirir ve veri hizmetlerinde süreklilik sağlar. -## Benefits of Grafting for Hotfixes +## Acil Güncellemelerde Aşılamanın Avantajları -1. **Rapid Deployment** +1. **Hızlı Dağıtım** - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + - **Kesinti Süresini En Aza İndirme**: Bir subgraph kritik bir hata ile karşılaştığında ve endekslemeyi durdurduğunda, aşılama sayesinde yeniden endekslemeyi beklemeden hemen bir düzeltme dağıtabilirsiniz. + - **Hızlıca Kurtarma**: Yeni subgraph, son endekslenmiş bloktan devam eder ve veri hizmetlerinin kesintisiz olmasını sağlar. -2. **Data Preservation** +2. **Veri Koruma** - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + - **Tarihsel Verileri Yeniden Kullan**: Aşılama, temel subgraph'ten mevcut verileri kopyalar, böylece değerli tarihsel kayıtları kaybetmezsiniz. + - **Tutarlılık**: Tutarlı tarihsel verilere bağımlı uygulamalar için veri sürekliliğini sağlar. -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. +3. **Verimlilik** + - **Zaman ve Kaynak Tasarrufu**: Büyük veri kümelerinin yeniden endekslenmesinden kaynaklı hesaplama yükünü önler. + - **Hatalara Odaklanma**: Geliştiricilerin veri kurtarma yönetimi yerine sorunları çözmeye odaklanmalarını sağlar. -## Best Practices When Using Grafting for Hotfixes +## Acil Güncellemeler için Aşılama Kullanmak - Örnek Uygulamalar -1. **Initial Deployment Without Grafting** +1. **Aşılama Olmadan Başlangıç Dağıtımı** - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + - **Sıfırdan Başlamak**: Subgraph'inizin ilk halini her zaman aşılama olmadan dağıtarak, stabil ve beklendiği gibi çalışmasını sağlayın. + - **Detaylı Test**: Gelecekte acil güncelleme yapmayı en aza indirmek için subgraph'in performansını doğrulayın. -2. **Implementing the Hotfix with Grafting** +2. **Aşılama ile Acil Güncelleme Yapmak** - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + - **Sorunu Belirleme**: Kritik bir hata oluştuğunda, son başarılı endekslenmiş olayın blok numarasını belirleyin. + - **Yeni Bir Subgraph Oluşturma**: Acil güncellemeyi içeren yeni bir subgraph geliştirin. + - **Aşılamayı Yapılandırma**: Dağıtılamamış subgraph'ten belirlenen blok numarasına kadar olan verileri kopyalamak için aşılama kullanın. + - **Hızlı Dağıtım**: Hizmeti en kısa sürede yeniden başlatmak için aşılanmış subgraph'i ağda yayımlayın. -3. **Post-Hotfix Actions** +3. **Acil Güncelleme Sonrasındaki Aksiyonlar** - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + - **Performansı Takip Etme**: Aşılanmış subgraph'in doğru şekilde endekslendiğinden ve acil güncellemenin sorunu çözdüğünden emin olun. + - **Aşılamadan Yeniden Yayımlama**: Stabil olduktan sonra, uzun vadede sürdürülebilirlik için, yeni bir subgraph versiyonunu aşılamadan dağıtın. + > Not: Aşılamaya süresiz olarak güvenmek önerilmez. Bu durum gelecekteki güncellemeleri ve bakımı karmaşık hale getirebilir. + - **Referansları Güncelleyin**: Bütün hizmetleri ve uygulamaları yeni, aşılanmamış subgraph'i kullanacak şekilde yönlendirin. -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. +4. **Önemli Hususlar** + - **Bloku Dikkatli Seçimek**: Veri kaybını önlemek için aşılama blok numarasını dikkatli seçin. + - **İpucu**: Doğru işlenmiş son olayın blok numarasını kullanın. + - **Dağıtım ID'sini Kullanın**: Subgraph ID'si yerine temel subgraph'in Dağıtım ID'sine referans verdiğinizden emin olun. + - **Not**: Dağıtım Kimliği, belirli bir subgraph dağıtımı için benzersiz bir tanımlayıcıdır. + - **Özellik Deklarasyonu**: Subgraph manifestosunda özellikler altında aşılamayı deklare etmeyi unutmayın. -## Example: Deploying a Hotfix with Grafting +## Örnek: Aşılama ile Bir Acil Güncelleme Dağıtmak -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. +Bir akıllı sözleşmeyi takip eden ve kritik bir hata nedeniyle endekslemeyi durdurmuş bir subgraph'e sahip olduğunuzu varsayalım. Bu durumda acil güncelleme dağıtmak için aşılamayı nasıl kullanabileceğiniz aşağıda açıklanmıştır. -1. **Failed Subgraph Manifest (subgraph.yaml)** +1. **Hata Veren Subgraph Manifestosu (subgraph.yaml)** ```yaml specVersion: 1.0.0 @@ -87,7 +88,7 @@ Suppose you have a subgraph tracking a smart contract that has stopped indexing file: ./src/old-lock.ts ``` -2. **New Grafted Subgraph Manifest (subgraph.yaml)** +2. **Yeni Aşılanmış Subgraph Manifestosu (subgraph.yaml)** ```yaml specVersion: 1.0.0 schema: @@ -99,7 +100,7 @@ Suppose you have a subgraph tracking a smart contract that has stopped indexing source: address: '0xNewContractAddress' abi: Lock - startBlock: 6000001 # Block after the last indexed block + startBlock: 6000001 # # Son endekslenmiş bloktan sonraki blok mapping: kind: ethereum/events apiVersion: 0.0.7 @@ -116,71 +117,71 @@ Suppose you have a subgraph tracking a smart contract that has stopped indexing features: - grafting graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block + base: QmBaseDeploymentID # Başarısız subgraph'in Dağıtım Kimliği + block: 6000000 # Başarıyla endekslenmiş son blok ``` -**Explanation:** +**Açıklama:** -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. +- **Veri Kaynağı Güncellemesi**: Yeni subgraph, akıllı sözleşmenin düzeltilmiş bir versiyonu olabilecek 0xNewContractAddress adresine işaret etmektedir. +- **Başlangıç Bloğu**: Hatanın tekrar işlenmesini önlemek için başarıyla endekslenmiş son bloktan bir blok sonraya ayarlayın. +- **Aşılama Yapılandırması**: + - **base**: Başarısız olan subgraph'in Dağıtım Kimliği. + - **block**: Aşılama işleminin başlaması gereken blok numarası. -3. **Deployment Steps** +3. **Dağıtım Adımları** - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. + - **Kodu Güncelleyin**: eşleme kodlarınıza (örneğin, handleWithdrawal kısmına) acil güncelleme uygulayın. + - **Manifestoyu Ayarlayın**: Yukarıda gösterildiği gibi, `subgraph.yaml` dosyasını aşılama yapılandırmalarıyla güncelleyin. + - **Subgraph'i Dağıtın**: + - Graph CLI ile kimlik doğrulaması yapın. + - `graph deploy` komutunu kullanarak yeni subgraph'i dağıtın. -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. +4. **Dağıtım Sonrası** + - **Endekslemeyi Doğrulama**: Subgraph'in aşılanma noktasından itibaren doğru endekslendiğinden emin olun. + - **Veriyi Takip Etme**: Yeni verilerin yakalandığından ve acil güncellemenin etkili olduğundan emin olun. + - **Yeniden Yayımlama İçin Planlama**: Uzun süreli istikrar için aşılama yapılmamış sürümün dağıtımını planlayın. -## Warnings and Cautions +## Uyarılar ve Dikkat Edilmesi Gerekenler -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. +Aşılama, acil güncellemeleri hızlı bir şekilde dağıtmayı sağlayan güçlü bir araçtır. Fakat veri bütünlüğünü korumak ve ideal performansı sağlamak için aşılanma kullanımından kaçınılması gereken belirli durumlar vardır. -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. +- **Uyumsuz Şema Değişiklikleri**: Acil güncelleme mevcut alanların türünü değiştirmeyi veya şemanızdan alanları kaldırmayı gerektiriyorsa, bu durumda aşılama uygun değildir. Aşılama, yeni subgraph'in şemasının temel subgraph'in şemasıyla uyumlu olmasını bekler. Uyumsuz değişiklikler, mevcut verilerin yeni şemayla uyumlu olmaması nedeniyle veri tutarsızlıklarına ve hatalara neden olabilir. +- **Önemli Eşlem Mantığı Revizyonları**: Acil güncelleme olayların işlenme şeklinin değiştirilmesi veya işleyici fonksiyonlarının değiştirilmesi gibi eşlem mantığınızda önemli değişiklikleri içeriyorsa, aşılama doğru çalışmayabilir. Buradaki yeni mantık, eski mantık altında işlenmiş verilerle uyumlu olmayabilir. Bu da hatalı verilere veya başarısız endekslemelere yol açabilir. +- **The Graph Ağına Dağıtımlar**: Aşılama, The Graph'in merkeziyetsiz ağı (ana ağ) için tasarlanmış subgraph'ler için önerilmez. Aşılama endekslemeyi karmaşıklaştırabilir ve tüm endeksleyiciler tarafından tamamen desteklenmeyebilir. Bu yüzden beklenmedik davranışlara veya artan maliyetlere neden olabilir. Ana ağ dağıtımları için, tam uyumluluk ve güvenilirliği sağlamak amacıyla subgraph'i en baştan, tekrar endekslemek daha güvenlidir. -### Risk Management +### Risk Yönetimi -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. +- **Veri Bütünlüğü**: Yanlış blok numaraları veri kaybına veya yinelenmelere yol açabilir. +- **Test Etme**: Aşılamayı daima önce geliştirme ortamında test ettikten sonra üretime dağıtın. -## Conclusion +## Sonuç -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: +Aşılama, subgraph geliştirme sürecinde acil düzeltmeleri dağıtmak için etkili bir stratejidir. Bu strateji, aşağıdakileri yapmanızı sağlar: -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. +- Yeniden endeksleme yapmadan kritik hatalardan **Hızla Kurtulun**. +- Uygulamalar ve kullanıcılar için sürekliliği koruyarak **Tarihsel Verileri Koruyun**. +- Kritik düzeltmeler sırasında kesinti sürelerini en aza indirerek **Hizmet Erişilebilirliğini Sağlayın**. -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. +Ancak, aşılamayı tedbirli bir şekilde kullanmak ve riskleri azaltmak için örnek uygulamaları takip etmek önemlidir. Subgraph'inizi acil düzeltmeyle stabilize ettikten sonra, uzun vadede çalışmasını sağlamak için aşılama kullanılmayan bir sürüm dağıtmayı planlayın. ## Ek Kaynaklar -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. +- **[Aşılama Dokümantasyonu](/subgraphs/cookbook/grafting/)**: Aşılama ile Bir Sözleşmeyi Değiştirin ve Geçmişini Koruyun +- **[Dağıtım Kimliklerini Anlamak](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Dağıtım Kimliği ile Subgraph Kimliği arasındaki farkı öğrenin. -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. +Subgraph geliştirme iş akışınıza aşılamayı dahil ederek, sorunlara hızla yanıt verme yeteneğinizi artırabilir ve veri hizmetlerinizin sağlam ve güvenilir kalmasını sağlayabilirsiniz. -## Subgraph Best Practices 1-6 +## Subgraph Örnek Uygulamalar 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Subgraph Budama ile Sorgu Hızını İyileştirin](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [@derivedFrom Kullanarak Endeksleme ve Sorgu Yanıt Hızını Artırın](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Değişmez Varlıklar ve Bytes ID'ler Kullanarak Endeksleme ve Sorgu Performansını Artırın](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Endeksleme Hızını `eth_calls`'den Kaçınarak İyileştirin](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Zaman Serileri ve Bütünleştirme ile Basitleştirin ve Optimize Edin](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Hızlı Düzeltme Dağıtımı için Aşılama Kullanın](/subgraphs/cookbook/grafting-hotfix/) From a2334a6e08f0f76b8d9cf8fa9c1312095792892a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:52 -0500 Subject: [PATCH 0649/1534] New translations grafting-hotfix.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/cookbook/grafting-hotfix.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/uk/subgraphs/cookbook/grafting-hotfix.mdx index a0bd3f4ab1c2..e74fa10fab97 100644 --- a/website/src/pages/uk/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR @@ -164,7 +165,7 @@ Grafting is an effective strategy for deploying hotfixes in subgraph development However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. -## Additional Resources +## Додаткові матеріали - **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting - **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. From 31495722ca87509f0c2aeaee2036f1d540f72323 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:53 -0500 Subject: [PATCH 0650/1534] New translations grafting-hotfix.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/cookbook/grafting-hotfix.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/zh/subgraphs/cookbook/grafting-hotfix.mdx index 8153e7143816..f0aa99760893 100644 --- a/website/src/pages/zh/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +title: 子图最佳实践6-使用嫁接快速部署修补程序 +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 5d41ac4d569f843f0b6dd4a71de4516e78f9936e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:54 -0500 Subject: [PATCH 0651/1534] New translations grafting-hotfix.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ur/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ur/subgraphs/cookbook/grafting-hotfix.mdx index 780d7ad3f827..f7b67fff6dbb 100644 --- a/website/src/pages/ur/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 759d47a3bbd21cb51b2ed37d893fb709ddba214d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:55 -0500 Subject: [PATCH 0652/1534] New translations grafting-hotfix.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/cookbook/grafting-hotfix.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/vi/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/vi/subgraphs/cookbook/grafting-hotfix.mdx index ddf32cc35aa6..240604aeffc6 100644 --- a/website/src/pages/vi/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR From 108ccd46b44907d7af232fe0a06cb6b4af006e10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:56 -0500 Subject: [PATCH 0653/1534] New translations grafting-hotfix.mdx (Marathi) --- website/src/pages/mr/subgraphs/cookbook/grafting-hotfix.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/mr/subgraphs/cookbook/grafting-hotfix.mdx index 871f930abd43..84bab57d46dc 100644 --- a/website/src/pages/mr/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR @@ -164,7 +165,7 @@ Grafting is an effective strategy for deploying hotfixes in subgraph development However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. -## Additional Resources +## अतिरिक्त संसाधने - **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting - **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. From ee4f96ab70d986ef53cbf8989a2c481cc1521212 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:57 -0500 Subject: [PATCH 0654/1534] New translations grafting-hotfix.mdx (Hindi) --- .../src/pages/hi/subgraphs/cookbook/grafting-hotfix.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/hi/subgraphs/cookbook/grafting-hotfix.mdx index 2bf58c320368..424f870521dc 100644 --- a/website/src/pages/hi/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: "Subgraph Best Practice 6: Grafting and Hotfixing" --- ## TLDR @@ -17,14 +18,14 @@ This feature enables quick deployment of hotfixes for critical issues, eliminati - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. -2. **Data Preservation** +2. **डेटा प्रिजर्वेशन** - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. 3. **Efficiency** - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + - **कृपया सुधारों पर ध्यान दें**: डेवलपर्स को डेटा रिकवरी प्रबंधन करने के बजाय समस्याओं को हल करने पर ध्यान केंद्रित करने की अनुमति देता है।" ## Best Practices When Using Grafting for Hotfixes @@ -164,7 +165,7 @@ Grafting is an effective strategy for deploying hotfixes in subgraph development However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. -## अतिरिक्त संसाधन +## Additional Resources - **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting - **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. From dec04e80c791776b00950d8d0be34c6f3bf754e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:55:58 -0500 Subject: [PATCH 0655/1534] New translations grafting.mdx (French) --- .../pages/fr/subgraphs/cookbook/grafting.mdx | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/grafting.mdx b/website/src/pages/fr/subgraphs/cookbook/grafting.mdx index dd8dd3f5223a..a81cf0ddf30a 100644 --- a/website/src/pages/fr/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/grafting.mdx @@ -20,9 +20,9 @@ Le subgraph greffé peut utiliser un schema GraphQL qui n'est pas identique à c Pour plus d’informations, vous pouvez vérifier : -- [Greffage](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +Dans ce tutoriel, nous couvrirons un cas d'utilisation de base. Nous remplacerons un contrat existant par un contrat identique (avec une nouvelle adresse, mais le même code). Ensuite, nous grefferons le subgraph existant sur le subgraph "de base" qui suit le nouveau contrat. ## Remarque importante sur le greffage lors de la mise à niveau vers le réseau @@ -30,13 +30,13 @@ In this tutorial, we will be covering a basic use case. We will replace an exist ### Pourquoi est-ce important? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Le greffage est une fonctionnalité puissante qui vous permet de "greffer" un subgraph sur un autre, transférant efficacement les données historiques du subgraph existant vers une nouvelle version. Il n'est pas possible de greffer un subgraph de The Graph Network vers Subgraph Studio. ### Les meilleures pratiques -**Migration initiale** : lorsque vous déployez pour la première fois votre subgraph sur le réseau décentralisé, faites-le sans greffe. Assurez-vous que le subgraph est stable et fonctionne comme prévu. +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. -**Mises à jour ultérieures** : une fois que votre subgraph est actif et stable sur le réseau décentralisé, vous pouvez utiliser le greffage pour les versions futures afin de rendre la transition plus fluide et de préserver les données historiques. +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. En respectant ces lignes directrices, vous minimisez les risques et vous vous assurez que le processus de migration se déroule sans heurts. @@ -44,13 +44,13 @@ En respectant ces lignes directrices, vous minimisez les risques et vous vous as Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [Exemple de subgraph repo](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Remarque : le contrat utilisé dans le subgraph a été tiré de ce [kit de démarrage pour hackathon](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Définition du manifeste du subgraph -Le manifeste du subgraph `subgraph.yaml` identifie les sources de données pour le subgraph, les déclencheurs d'intérêt et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Vous trouverez ci-dessous un exemple de manifeste de subgraph que vous pourrez utiliser : +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- La source de données `Lock` est l'adresse abi et le contrat que nous obtiendrons lorsque nous compilerons et déploierons le contrat +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- La section `mapping` définit les déclencheurs d'intérêt et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Dans ce cas, nous écoutons l'événement `Withdrawal` et appelons la fonction `handleWithdrawal` lorsqu'elle est émise. +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## Définition de manifeste de greffage @@ -90,21 +90,21 @@ Le greffage nécessite l'ajout de deux nouveaux éléments au manifeste du subgr ```yaml --- features: - - grafting # feature name + - grafting # nom de la fonctionnalité graft: - base: Qm... # subgraph ID of base subgraph - block: 5956000 # block number + base: Qm... # ID du subgraph de base + block: 5956000 # numéro du bloc ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft :` est une carte du subgraph `base` et du bloc sur lequel greffer. Le `block` est le numéro de bloc à partir duquel commencer l'indexation. Le graph copiera les données du subgraph de base jusqu'au bloc donné inclus, puis continuera à indexer le nouveau subgraph à partir de ce bloc. +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -Les valeurs de `base` et de `bloc` peuvent être trouvées en déployant deux subgraphs : un pour l'indexation de base et un avec la méthode du greffage +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## Déploiement du subgraph de base 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Suivez les instructions de la section `AUTH& DEPLOY` sur la page de votre subgraph dans le dossier `graft-example` du dépôt +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. Une fois terminé, vérifiez que le subgraph s'indexe correctement. Si vous exécutez la commande suivante dans The Graph Playground ```graphql @@ -146,8 +146,8 @@ Le subgraph.yaml de remplacement du greffon aura une nouvelle adresse de contrat 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` 2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Suivez les instructions de la section `AUTH& DEPLOY` sur la page de votre subgraph dans le dossier `graft-replacement` du répertoire -4. Une fois cette opération terminée, vérifiez que le subgraph est correctement indexé. Si vous exécutez la commande suivante dans The Graph Playground +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo +4. Une fois terminé, vérifiez que le subgraph s'indexe correctement. Si vous exécutez la commande suivante dans The Graph Playground ```graphql { @@ -187,11 +187,11 @@ Le résultat devrait être le suivant : You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Félicitations ! Vous avez réussi à greffer un subgraph sur un autre subgraph. -## Ressources complémentaires +## Ressources supplémentaires -If you want more experience with grafting, here are a few examples for popular contracts: +Si vous souhaitez acquérir plus d'expérience avec le greffage, voici quelques exemples pour des contrats populaires : - [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) @@ -199,4 +199,4 @@ If you want more experience with grafting, here are a few examples for popular c To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> Notez : Une grande partie de cet article a été reprise de l'[article Arweave](/subgraphs/cookbook/arweave/) publié précédemment +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From 62aac48f1f233d58fcffcd8b8ca73d943505a744 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:00 -0500 Subject: [PATCH 0656/1534] New translations grafting.mdx (Spanish) --- .../pages/es/subgraphs/cookbook/grafting.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/grafting.mdx b/website/src/pages/es/subgraphs/cookbook/grafting.mdx index ddd57394fdab..4a98c7ab352b 100644 --- a/website/src/pages/es/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/es/subgraphs/cookbook/grafting.mdx @@ -44,13 +44,13 @@ By adhering to these guidelines, you minimize risks and ensure a smoother migrat Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [Repo de ejemplo de subgrafo](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Nota: El contrato utilizado en el subgrafo fue tomado del siguiente [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). -## Definición del manifiesto del subgrafo +## Definición de manifiesto del subgrafo -El manifiesto del subgrafo `subgraph.yaml` identifica las fuentes de datos para el subgrafo, los disparadores de interés y las funciones que deben ejecutarse en respuesta a esos disparadores. A continuación se muestra un ejemplo de manifiesto de subgrafos que se utilizará: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- La fuente de datos de `Lock` es el ABI y la dirección del contrato que obtendremos cuando compilemos y realicemos el deploy del contrato +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- La sección de `mapeo` define los disparadores de interés y las funciones que deben ejecutarse en respuesta a esos disparadores. En este caso, estamos escuchando el evento `Withdrawal` y llamando a la función `handleWithdrawal` cuando se emite. +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## Definición del manifiesto de grafting @@ -97,14 +97,14 @@ graft: ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft`: es un mapa del subgrafo `base` y del bloque al que se va a realizar el grafting. El `bloque` es el número de bloque desde el que se empieza a indexar. The Graph copiará los datos del subgrafo base hasta el bloque dado, inclusive, y luego continuará indexando el nuevo subgrafo a partir de ese bloque. +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -Los valores de la `base` y del `bloque` se pueden encontrar deployando dos subgrafos: uno para la indexación de la base y otro con grafting +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## Deploy del subgrafo base 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Sigue las instrucciones de la sección `AUTH & DEPLOY` en tu página de subgrafo en la carpeta `graft-example` del repo +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. Una vez que hayas terminado, verifica que el subgrafo se está indexando correctamente. Si ejecutas el siguiente comando en The Graph Playground ```graphql @@ -146,7 +146,7 @@ El subgraph.yaml de sustitución del graft tendrá una nueva dirección de contr 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` 2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Sigue las instrucciones de la sección `AUTH & DEPLOY` en la página de tu subgrafo en la carpeta `graft-replacement` del repo +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo 4. Una vez que hayas terminado, verifica que el subgrafo se está indexando correctamente. Si ejecutas el siguiente comando en The Graph Playground ```graphql @@ -199,4 +199,4 @@ If you want more experience with grafting, here are a few examples for popular c To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> Nota: Gran parte del material de este artículo se ha extraído del artículo publicado anteriormente por [Arweave](/subgraphs/cookbook/arweave/) +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From c0867938d90e6f1431e3b5868e9af09aa02f43bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:01 -0500 Subject: [PATCH 0657/1534] New translations grafting.mdx (Arabic) --- website/src/pages/ar/subgraphs/cookbook/grafting.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/grafting.mdx b/website/src/pages/ar/subgraphs/cookbook/grafting.mdx index bfbe1c2a19d9..704e7df3f3f6 100644 --- a/website/src/pages/ar/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/grafting.mdx @@ -12,15 +12,15 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o - يضيف أو يزيل أنواع الكيانات - يزيل الصفات من أنواع الكيانات -- يضيف صفات nullable لأنواع الكيانات -- يحول صفات non-nullable إلى صفات nullable -- يضيف قيما إلى enums -- يضيف أو يزيل الواجهات +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces - يغير للكيانات التي يتم تنفيذ الواجهة لها For more information, you can check: -- [تطعيم(Grafting)](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. From f2cdf551fec36959d6a9c4cae85ed0c928d6fceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:02 -0500 Subject: [PATCH 0658/1534] New translations grafting.mdx (Czech) --- .../pages/cs/subgraphs/cookbook/grafting.mdx | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/grafting.mdx b/website/src/pages/cs/subgraphs/cookbook/grafting.mdx index d18fe0be5fe5..ca0ab0367451 100644 --- a/website/src/pages/cs/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/grafting.mdx @@ -20,13 +20,13 @@ Při roubování se znovu použijí data z existujícího podgrafu a začne se i Další informace naleznete na: -- [Roubování](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. ## Důležité upozornění k roubování při aktualizaci na síť -> **Upozornění**: Doporučujeme nepoužívat roubování pro podgrafy publikované v síti grafů +> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network ### Proč je to důležité? @@ -34,23 +34,23 @@ In this tutorial, we will be covering a basic use case. We will replace an exist ### Osvědčené postupy -**Počáteční migrace**: při prvním nasazení podgrafu do decentralizované sítě tak učiňte bez roubování. Ujistěte se, že je podgraf stabilní a funguje podle očekávání. +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. -**Následující aktualizace**: jakmile je váš podgraf v decentralizované síti živý a stabilní, můžete použít roubování pro budoucí verze, aby byl přechod plynulejší a aby byla zachována historická data. +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Dodržováním těchto pokynů minimalizujete rizika a zajistíte hladší průběh migrace. ## Vytvoření existujícího podgrafu -Vytváření podgrafů je důležitou součástí Grafu, která je podrobněji popsána [zde](/subgraphs/quick-start/). Aby bylo možné sestavit a nasadit existující podgraf použitý v tomto tutoriálu, je k dispozici následující repozitář: +Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [Příklad repo subgrafu](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Poznámka: Smlouva použitá v dílčím grafu byla převzata z následující [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Definice podgrafu Manifest -Manifest podgrafu `subgraph.yaml` identifikuje zdroje dat pro podgraf, zajímavé spouštěče a funkce, které by měly být spuštěny v reakci na tyto spouštěče. Níže naleznete příklad manifestu podgrafu, který budete používat: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- Zdroj dat `Lock` je adresa abi a smlouvy, kterou získáme při kompilaci a nasazení smlouvy +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- Sekce `mapování` definuje spouštěče, které vás zajímají, a funkce, které by měly být spuštěny v reakci na tyto spouštěče. V tomto případě nasloucháme na událost `Výstup` a po jejím vyslání voláme funkci `obsluhovatVýstup`. +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## Definice manifestu roubování @@ -96,15 +96,15 @@ graft: block: 5956000 # block number ``` -- `funkce:` je seznam všech použitých [jmen funkcí](/developing/creating-a-subgraph/#experimental-features). -- `graft:` je mapa subgrafu `base` a bloku, na který se má roubovat. `block` je číslo bloku, od kterého začít indexovat. Graph zkopíruje data základního subgrafu až k zadanému bloku včetně, a poté pokračuje v indexaci nového subgrafu od tohoto bloku dále. +- `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -Hodnoty `base` a `block` lze nalézt nasazením dvou podgrafů: jednoho pro základní indexování a druhého s roubováním +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## Nasazení základního podgrafu -1. Přejděte do [Podgraf Studio](https://thegraph.com/studio/) a vytvořte podgraf v testovací síti Sepolia s názvem `graft-example` -2. Následujte pokyny v části `AUTH & DEPLOY` na stránce vašeho subgrafu v adresáři `graft-example` ve vašem repozitáři +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. Po dokončení ověřte, zda se podgraf správně indexuje. Pokud spustíte následující příkaz v The Graph Playground ```graphql @@ -144,9 +144,9 @@ Jakmile ověříte, že se podgraf správně indexuje, můžete jej rychle aktua Náhradní podgraf.yaml bude mít novou adresu smlouvy. K tomu může dojít při aktualizaci dapp, novém nasazení kontraktu atd. -1. Přejděte do [Podgraf Studio](https://thegraph.com/studio/) a vytvořte podgraf v testovací síti Sepolia s názvem `graft-replacement` -2. Vytvořte nový manifest. Soubor `subgraph.yaml` pro `graph-replacement` obsahuje jinou adresu kontraktu a nové informace o tom, jak by měl být podgraf nasazen. Tyto informace zahrnují `block` [poslední emitovanou událost](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) od starého kontraktu a `base` starého podgrafu. ID `base` podgrafu je `Deployment ID` vašeho původního `graph-example` subgrafu. To můžete najít v Podgraf Studiu. -3. Postupujte podle pokynů v části `AUTH & DEPLOY` na stránce podgrafu ve složce `graft-replacement` z repozitáře +1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` +2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo 4. Po dokončení ověřte, zda se podgraf správně indexuje. Pokud spustíte následující příkaz v The Graph Playground ```graphql @@ -185,7 +185,7 @@ Měla by vrátit následující: } ``` -Vidíte, že podgraf `graft-replacement` indexuje ze starších dat `graph-example` a novějších dat z nové adresy smlouvy. Původní smlouva emitovala dvě události `Odstoupení`, [Událost 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) a [Událost 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). Nová smlouva emitovala jednu událost `Výběr` poté, [Událost 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). Dvě dříve indexované transakce (Událost 1 a 2) a nová transakce (Událost 3) byly spojeny dohromady v podgrafu `výměna-odvod`. +You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. Gratulujeme! Úspěšně jste naroubovali podgraf na jiný podgraf. @@ -197,6 +197,6 @@ If you want more experience with grafting, here are a few examples for popular c - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), -Chcete-li se stát ještě větším odborníkem na graf, zvažte možnost seznámit se s dalšími způsoby zpracování změn v podkladových zdrojích dat. Alternativy jako [Šablony zdroje dat](/developing/creating-a-subgraph/#data-source-templates) mohou dosáhnout podobných výsledků +To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> Poznámka: Mnoho materiálů z tohoto článku bylo převzato z dříve publikovaného [článku Arweave](/subgraphs/cookbook/arweave/) +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From df3f9e5c4b4732edb714a227ecd18ea9f6cab9c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:03 -0500 Subject: [PATCH 0659/1534] New translations grafting.mdx (German) --- website/src/pages/de/subgraphs/cookbook/grafting.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/cookbook/grafting.mdx b/website/src/pages/de/subgraphs/cookbook/grafting.mdx index 593ab8880977..ee92710b3059 100644 --- a/website/src/pages/de/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/de/subgraphs/cookbook/grafting.mdx @@ -189,7 +189,7 @@ You can see that the `graft-replacement` subgraph is indexing from older `graph- Congrats! You have successfully grafted a subgraph onto another subgraph. -## Additional Resources +## Zusätzliche Ressourcen If you want more experience with grafting, here are a few examples for popular contracts: From fe505cbaf1190dbfa900885da32e3b86c71ba9f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:04 -0500 Subject: [PATCH 0660/1534] New translations grafting.mdx (Italian) --- .../pages/it/subgraphs/cookbook/grafting.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/it/subgraphs/cookbook/grafting.mdx b/website/src/pages/it/subgraphs/cookbook/grafting.mdx index 8ec621c2ace6..57d5169830a7 100644 --- a/website/src/pages/it/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/it/subgraphs/cookbook/grafting.mdx @@ -8,15 +8,15 @@ In this guide, you will learn how to build and deploy new subgraphs by grafting Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. -Il grafted subgraph può utilizzare uno schema GraphQL non identico a quello del subgraph di base, ma semplicemente compatibile con esso. Deve essere uno schema di subgraph valido di per sé, ma può discostarsi dallo schema del subgraph di base nei seguenti modi: - -- Aggiunge o rimuove i tipi di entità -- Rimuove gli attributi dai tipi di entità -- Aggiunge attributi annullabili ai tipi di entità -- Trasforma gli attributi non nulli in attributi nulli -- Aggiunge valori agli enum -- Aggiunge o rimuove le interfacce -- Cambia per quali tipi di entità viene implementata un'interfaccia +The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: + +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces +- It changes for which entity types an interface is implemented For more information, you can check: From 6edd51facce2bc573179a2cb9a14bf57a64f6559 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:05 -0500 Subject: [PATCH 0661/1534] New translations grafting.mdx (Japanese) --- .../pages/ja/subgraphs/cookbook/grafting.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/grafting.mdx b/website/src/pages/ja/subgraphs/cookbook/grafting.mdx index b1c02b9495f5..0be8b13c8dbd 100644 --- a/website/src/pages/ja/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/grafting.mdx @@ -34,9 +34,9 @@ Grafting is a powerful feature that allows you to "graft" one subgraph onto anot ### ベストプラクティス -**初期移行**: サブグラフを初めて分散ネットワークにデプロイするときは、移植せずに実行してください。 サブグラフが安定しており、期待どおりに機能していることを確認します。 +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. -**その後の更新**: サブグラフが分散ネットワーク上で稼働し、安定したら、移行をよりスムーズにし、履歴データを保存するために、将来のバージョンにグラフティングを使用できます。 +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. これらのガイドラインに従うことで、リスクを最小限に抑え、よりスムーズな移行プロセスを確保できます。 @@ -44,13 +44,13 @@ Grafting is a powerful feature that allows you to "graft" one subgraph onto anot Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [サブグラフ例文レポ](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> 注:サブグラフで使用されているコントラクトは、以下の[Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit)から取得したものです。 +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## サブグラフマニフェストの定義 -サブグラフ マニフェスト `subgraph.yaml` は、サブグラフのデータ ソース、関心のあるトリガー、およびこれらのトリガーに応答して実行される関数を識別します。使用するサブグラフ マニフェストの例については、以下を参照してください。 +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- `Lock`データソースは、コンパイルとデプロイ時に取得するアビとコントラクトのアドレスです。 +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- `mapping`セクションでは、関心のあるトリガーと、それらのトリガーに応答して実行されるべき関数を定義しています。この場合、`Withdrawal`イベントをリスニングし、それが発信されたときに`handleWithdrawal`関数を呼び出すことにしています。 +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## グラフティングマニフェストの定義 @@ -97,14 +97,14 @@ graft: ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:`は`base`サブグラフとグラフティングをするブロックのマップです。`block`はインデックスを開始するブロック番号です。Graphは、指定されたブロックまでのベースサブグラフのデータをコピーし、そのブロックから新しいサブグラフのインデックスを作成し続けます。 +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -`base`と`block`の値は、2つのサブグラフを展開することで求めることができます:一つはベースインデックス用、もう一つはグラフティング用です +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## ベースサブグラフの起動 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. レポの `graft-example` フォルダ内のサブグラフのページにある `AUTH & DEPLOY` セクションの指示にしたがってください。 +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. 終了後、サブグラフが正しくインデックスされていることを確認します。The Graph Playgroundで以下のコマンドを実行すると、サブグラフが正常にインデックスされます。 ```graphql @@ -146,7 +146,7 @@ graft: 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` 2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. レポの `graft-replacement` フォルダ内のサブグラフのページにある `AUTH & DEPLOY` セクションの指示にしたがってください。 +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo 4. 終了後、サブグラフが正しくインデックスされていることを確認します。The Graph Playgroundで以下のコマンドを実行すると、サブグラフが正常にインデックスされます。 ```graphql @@ -193,10 +193,10 @@ Congrats! You have successfully grafted a subgraph onto another subgraph. If you want more experience with grafting, here are a few examples for popular contracts: -- [曲線](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) +- [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> 注:この記事の多くの資料は、以前公開された[アルウィーブの記事](/subgraphs/cookbook/arweave/)から引用したものです。 +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From 8cfa57d69cd1f9bee2c6d052148a0e366d6b9c1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:08 -0500 Subject: [PATCH 0662/1534] New translations grafting.mdx (Portuguese) --- .../pages/pt/subgraphs/cookbook/grafting.mdx | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/grafting.mdx b/website/src/pages/pt/subgraphs/cookbook/grafting.mdx index 2110f465f014..cbfc42ddc895 100644 --- a/website/src/pages/pt/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/grafting.mdx @@ -26,7 +26,7 @@ Neste tutorial, cobriremos um caso de uso básico. Substituiremos um contrato ex ## Notas Importantes sobre Enxertos ao Migrar Para a Graph Network -> **Cuidado:** Recomendamos fortemente não usar enxertos para subgraphs publicados na Graph Network +> **Cuidado**: Não é recomendado usar enxertos para subgraphs editados na The Graph Network ### Qual a Importância Disto? @@ -34,23 +34,23 @@ Isto é um recurso poderoso que permite que os programadores "enxertem" um subgr ### Boas práticas -**Migração Inicial**: na primeira publicação do seu subgraph à rede descentralizada, faça-o sem enxertos. Garanta que o subgraph está estável e que ele funciona como esperado. +**Migração Inicial**: Ao implantar o seu subgraph pela primeira vez na rede descentralizada, faça-o sem enxertos. Verifique se o subgraph está estável e funciona como esperado. -**Atualizações Subsequentes**: quando o seu subgraph estiver ao vivo e estável na rede descentralizada, use o enxerto em versões futuras para suavizar a transição e preservar dados históricos. +**Atualizações Subsequentes**: quando o seu subgraph estiver ativo e estável na rede descentralizada, será possível usar enxertos para versões futuras, para tornar a transição mais suave e preservar dados históricos. Ao aderir a estas diretrizes, dá para minimizar riscos e garantir um processo de migração mais suave. ## Como Construir um Subgraph Existente -Construir subgraphs é essencial para o Graph; o processo é descrito em mais detalhes [aqui](/subgraphs/quick-start/). Para poder construir e lançar o subgraph existente usado neste tutorial, há o seguinte repo: +Construir subgraphs é uma parte essencial do The Graph, descrita mais profundamente [aqui](/subgraphs/quick-start/). Para poder construir e implementar o subgraph existente usado neste tutorial, veja o seguinte repositório: -- [Exemplo de repo de subgraph](https://github.com/Shiyasmohd/grafting-tutorial) +- [Exemplo de repositório de subgraph](https://github.com/Shiyasmohd/grafting-tutorial) -> Nota: O contrato usado no subgraph foi retirado do seguinte [Kit de Iniciante de Hackathon](https://github.com/schmidsi/hackathon-starterkit). +> Nota: O contrato usado no subgraph foi tirado do seguinte [Kit para Iniciantes de Hackathon](https://github.com/schmidsi/hackathon-starterkit). ## Definição de Manifest de Subgraph -O manifest de subgraph `subgraph.yaml` identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a tais gatilhos. Veja abaixo um exemplo de um manifest de subgraph para uso prático: +O manifest do subgraph `subgraph.yaml` identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a esses gatilhos. Veja abaixo um exemplo de manifesto de subgraph para usar: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- A fonte de dados `Lock` é o abi e o endereço do contrato que receberemos ao compilar e lançar o contrato -- A rede deve corresponder a uma rede indexada que está a ser consultada em query. Como executamos na testnet Sepolia, a rede é `sepolia` -- A seção `mapping` define os gatilhos de interesse e as funções que devem ser executadas em resposta àqueles gatilhos. Neste caso, esperamos o evento `Withdrawal` e chamaremos a função `handleWithdrawal` quando o evento for emitido. +- A fonte de dados `Lock` consiste na ABI (Interface binária de aplicação), e o endereço de contrato que receberemos ao compilar e implantar o contrato +- A rede deve corresponder a uma rede indexada a ser consultada em query. Como estamos a operar na testnet da Sepolia, a rede é `sepolia` +- A seção `mapping` define os gatilhos de interesse e as funções a executar em resposta a esses. Neste caso, estamos à espera do evento `Withdrawal`; quando emitido, chamaremos a função `handleWithdrawal`. ## Definição de Manifest de Enxertos @@ -96,15 +96,15 @@ graft: block: 5956000 # block number ``` -- `features`: uma lista de todos os nomes de [feature](#experimental-features) usados. -- `graft:` um mapa do subgraph `base` e o bloco que o enxertará. O `block` é o número do bloco de onde a indexação começará. O Graph então copiará os dados do subgraph base, até e incluindo o bloco dado, e então continuará a indexar o novo subgraph a partir daquele bloco. +- `features:` é uma lista de todos os [nomes de função](/developing/creating-a-subgraph/#experimental-features) usados. +- `graft:` é um mapa do subgraph `base` e ​​do bloco para enxertar. `block` é o número do bloco para começar a indexação. The Graph copiará os dados do subgraph base até, e incluindo, o bloco fornecido, e então continuará a indexar o novo subgraph a partir desse bloco em diante. -Os valores `base` e `block` podem ser encontrados ao lançar dois subgraphs: um para a indexação base e um com o enxerto +Os valores `base` e ​​`block` podem ser encontrados com a implantação de dois subgraphs: um para indexação de base e outro com enxerto ## Como Lançar o Subgraph Base -1. Vá para o [Subgraph Studio](https://thegraph.com/studio/) e crie um subgraph na testnet Goerli chamado `graft-example` -2. Siga as direções na seção `AUTH & DEPLOY` na página do seu subgraph, na pasta `graft-example` do repo +1. Vá para o [Subgraph Studio](https://thegraph.com/studio/) e crie um subgraph na testnet da Sepolia chamado `graft-example` +2. Siga as direções na seção `AUTH & DEPLOY` na sua página de subgraph, na pasta `graft-example` do repositório 3. Ao terminar, verifique que o subgraph está a indexar corretamente. Se executar o seguinte comando no The Graph Playground ```graphql @@ -144,9 +144,9 @@ Após verificar que o subgraph está a indexar corretamente, será possível atu O subgraph.yaml do substituto terá um novo endereço de contrato. Isto pode acontecer quando atualizar o seu dapp, relançar um contrato, etc. -1. Vá para o [Subgraph Studio](https://thegraph.com/studio/) e crie um subgraph na testnet Sepolia chamado `graft-replacement` -2. Crie um novo manifest. O `subgraph.yaml` para o `graph-replacement` contém um endereço de contrato diferente e novas informações sobre como ele deve enxertar. Estes são o `block` do [último evento emitido](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) de seu interesse pelo contrato antigo, e o `base` do subgraph antigo. O ID do subgraph `base` é o `Deployment ID` do seu subgraph `graph-example`. Isto está disponível no UI do Graph Studio. -3. Siga as direções na seção `AUTH & DEPLOY` na página do seu subgraph, na pasta `graft-replacement` do repo +1. Vá para o [Subgraph Studio](https://thegraph.com/studio/) e crie um subgraph na testnet da Sepolia chamado `graft-replacement` +2. Crie um novo manifesto. O `subgraph.yaml` para `graph-replacement` contém um endereço de contrato diferente e novas informações sobre como ele deve enxertar. Estes são o `block` do [último evento importante emitido](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) pelo contrato antigo, e o `base` do subgraph antigo. A ID de subgraph `base` é a `Deployment ID` do seu subgraph `graph-example` original. Você pode encontrá-la no Subgraph Studio. +3. Siga as instruções na seção `AUTH & DEPLOY` da sua página de subgraph, na pasta `graft-replacement` do repositório 4. Ao terminar, verifique que o subgraph está a indexar corretamente. Se executar o seguinte comando no The Graph Playground ```graphql @@ -185,7 +185,7 @@ Ele deve retornar algo como: } ``` -É possível ver que o subgraph `graft-replacement` indexa a partir de dados mais antigos do `graph-example` e de dados mais novos do novo endereço de contrato. O contrato original emitiu dois eventos `Withdrawal`, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) e [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). O novo contrato emitiu um `Withdrawal` depois, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). As duas transações indexadas anteriormente (Event 1 e 2) e a nova transação (Event 3) foram combinadas no subgraph `graft-replacement`. +Repare que o subgraph `graft-replacement` está a indexar a partir de dados `graph-example` mais antigos e dados mais novos do novo endereço de contrato. O contrato original emitiu dois eventos `Withdrawal`: [Evento 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) e [Evento 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). O novo contrato emitiu um `Withdrawal` após, [Evento 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). As duas transações indexadas anteriormente (Evento 1 e 2) e a nova transação (Evento 3) foram combinadas no subgraph `graft-replacement`. Parabéns! Enxertaste um subgraph em outro subgraph. @@ -199,4 +199,4 @@ Caso queira mais experiência com enxertos, veja alguns exemplos de contratos po Para se tornar especialista no The Graph, aprenda sobre outras maneiras de cuidar de mudanças em fontes de dados subjacentes. Alternativas como [Modelos de Fontes de Dados](/developing/creating-a-subgraph/#data-source-templates) podem dar resultados parecidos -> Nota: Grande parte do material deste artigo foi tirado do [artigo sobre o Arweave](/subgraphs/cookbook/arweave/) +> Nota: Grande parte do material deste artigo foi tirado do [artigo anteriormente editado sobre o Arweave](/subgraphs/cookbook/arweave/) From bcf323074e2f2e37d16b12ba14aef789a8e99837 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:09 -0500 Subject: [PATCH 0663/1534] New translations grafting.mdx (Russian) --- .../pages/ru/subgraphs/cookbook/grafting.mdx | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/grafting.mdx b/website/src/pages/ru/subgraphs/cookbook/grafting.mdx index 3b2308c83c0f..8605468ff4e7 100644 --- a/website/src/pages/ru/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/grafting.mdx @@ -20,37 +20,37 @@ title: Замените контракт и сохраните его истор Для получения дополнительной информации Вы можете перейти: -- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Графтинг](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +В этом руководстве мы рассмотрим базовый случай использования. Мы заменим существующий контракт идентичным (с новым адресом, но с тем же кодом). Затем подключим существующий субграф к "базовому" субграфу, который отслеживает новый контракт. ## Важное примечание о Grafting при обновлении до сети -> **Внимание**: Не рекомендуется использовать Grafting для субграфов, опубликованных в The Graph Network +> **Предупреждение**: Рекомендуется не использовать графтинг для субграфов, опубликованных в сети The Graph ### Почему это важно? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Grafting — это мощная функция, которая позволяет «переносить» один субграф в другой, фактически перенося исторические данные из существующего субграфа в новую версию. Однако перенос субграфа из The Graph Network обратно в Subgraph Studio невозможен. ### Лучшие практики -**Первоначальная миграция**: когда вы впервые развертываете свой субграф в децентрализованной сети, делайте это без Grafting. Убедитесь, что субграф стабилен и функционирует должным образом. +**Первоначальная миграция**: при первом развертывании субграфа в децентрализованной сети рекомендуется не использовать графтинг. Убедитесь, что субграф стабилен и работает должным образом. -**Последующие обновления**: как только Ваш субграф станет активным и стабильным в децентрализованной сети, Вы сможете использовать Grafting для будущих версий, чтобы сделать переход более плавным и сохранить исторические данные. +**Последующие обновления**: когда Ваш субграф будет развернут и стабилен в децентрализованной сети, Вы можете использовать графтинг для будущих версий, чтобы облегчить переход и сохранить исторические данные. Соблюдая эти рекомендации, Вы минимизируете риски и обеспечите более плавный процесс миграции. ## Создание существующего субграфа -Создание субграфов — важная часть The Graph, более подробно описанная [здесь](/subgraphs/quick-start/). Чтобы иметь возможность создать и развернуть существующий субграф, используемый в этом руководстве, предоставляется следующий репозиторий: +Создание субграфов — это важная часть работы с The Graph, более подробно описанная [здесь](/subgraphs/quick-start/). Для того чтобы иметь возможность создать и развернуть существующий субграф, используемый в этом руководстве, предоставлен следующий репозиторий: -- [Репозиторий субграфа в качестве примера](https://github.com/Shiyasmohd/grafting-tutorial) +- [Пример репозитория субграфа](https://github.com/Shiyasmohd/grafting-tutorial) -> Примечание: контракт, использованный в субграфе, был взят из [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Примечание: Контракт, используемый в субграфе, был взят из следующего [стартового набора Hackathon](https://github.com/schmidsi/hackathon-starterkit). ## Определение манифеста субграфа -Манифест субграфа `subgraph.yaml` определяет источники данных для субграфа, релевантные триггеры и функции, которые должны выполняться в ответ на эти триггеры. Ниже приведен пример манифеста субграфа, который Вы будете использовать: +Манифест субграфа `subgraph.yaml` определяет источники данных для субграфа, триггеры, которые представляют интерес, и функции, которые должны быть выполнены в ответ на эти триггеры. Ниже приведен пример манифеста субграфа, который Вы будете использовать: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- Источник данных `Lock` — это abi и адрес контракта, которые мы получим при компиляции и развертывании контракта -- The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- Раздел `mapping` определяет интересующие нас триггеры и функции, которые должны запускаться в ответ на эти триггеры. В этом случае мы уделяем внимание событию `Withdrawal` и вызываем функцию `handleWithdrawal` при его возникновении. +- Источник данных `Lock` — это ABI и адрес контракта, которые мы получим при компиляции и развертывании контракта +- Сеть должна соответствовать индексируемой сети, к которой выполняется запрос. Поскольку мы работаем в тестнете Sepolia, сеть будет `sepolia`. +- Раздел `mapping` определяет триггеры, которые представляют интерес, и функции, которые должны быть выполнены в ответ на эти триггеры. В данном случае мы слушаем событие `Withdrawal` и вызываем функцию `handleWithdrawal`, когда оно срабатывает. ## Определение Манифеста Grafting @@ -90,21 +90,21 @@ Grafting требует добавления двух новых элемент ```yaml --- features: - - grafting # feature name + - grafting # наименование функции graft: - base: Qm... # subgraph ID of base subgraph - block: 5956000 # block number + base: Qm... # идентификатор субграфа базового субграфа + block: 5956000 # номер блока ``` -- `features` — это список всех используемых [названий функций](/developing/creating-a-subgraph/#experimental-features). -- `graft:` — это карта субграфа `base` и блока, к которому применяется графтинг (перенос). `block` — это номер блока, с которого начинается индексация. The Graph скопирует данные базового субграфа до указанного блока включительно, а затем продолжит индексацию нового субграфа, начиная с этого блока. +- `features:` — это список всех используемых [имен функций](/developing/creating-a-subgraph/#experimental-features). +- `graft:` — это отображение базового субграфа и блока, к которому применяется графтинг (перенос). `block` — это номер блока, с которого нужно начать индексирование. The Graph скопирует данные из базового субграфа до указанного блока включительно, а затем продолжит индексирование нового субграфа с этого блока. -Значения `base` и `block` можно найти, развернув два субграфа: один для базовой индексации, а другой с графтингом (переносом) +Значения `base` и `block` можно найти, развернув два субграфа: один для базового индексирования и один с графтингом ## Развертывание базового субграфа -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Следуйте инструкциям в `AUTH & DEPLOY` на странице субграфа в `graft-example` из репо +1. Перейдите в [Subgraph Studio](https://thegraph.com/studio/) и создайте субграф в тестнете Sepolia с названием `graft-example` +2. Следуйте инструкциям в разделе `AUTH & DEPLOY` на странице своего субграфа в папке `graft-example` репозитория 3. После завершения убедитесь, что субграф правильно индексируется. Если Вы запустите следующую команду в The Graph Playground ```graphql @@ -144,9 +144,9 @@ graft: Замененный subgraph.yaml будет иметь новый адрес контракта. Это может произойти, когда Вы обновите свое децентрализованное приложение, повторно развернете контракт и т. д. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Следуйте инструкциям в `AUTH & DEPLOY` на странице субграфа в `graft-replacement` из репозитория +1. Перейдите в [Subgraph Studio](https://thegraph.com/studio/) и создайте субграф в тестнете Sepolia с названием `graft-replacement` +2. Создайте новый манифест. `subgraph.yaml` для `graph-replacement` содержит другой адрес контракта и новую информацию о том, как он должен быть присоединен. Это `block` [последнего сгенерированного события], которое Вас интересует, вызванного старым контрактом, и `base` старого субграфа. Идентификатор субграфа `base` — это `Deployment ID` Вашего исходного субграфа `graph-example`. Вы можете найти его в Subgraph Studio. +3. Следуйте инструкциям в разделе `AUTH & DEPLOY` на странице своего субграфа в папке `graft-replacement` репозитория 4. После завершения убедитесь, что субграф правильно индексируется. Если Вы запустите следующую команду в The Graph Playground ```graphql @@ -185,18 +185,18 @@ graft: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +Вы можете увидеть, что субграф `graft-replacement` индексирует данные как из старого субграфа `graph-example`, так и из новых данных из нового адреса контракта. Исходный контракт сгенерировал два события `Withdrawal`, [Событие 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) и [Событие 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). Новый контракт сгенерировал одно событие `Withdrawal` после этого, [Событие 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). Две ранее индексируемые транзакции (События 1 и 2) и новая транзакция (Событие 3) были объединены в субграфе `graft-replacement`. Поздравляем! Вы успешно перенесли один субграф в другой. ## Дополнительные ресурсы -If you want more experience with grafting, here are a few examples for popular contracts: +Если Вы хотите получить больше опыта в графтинге (переносе), вот несколько примеров популярных контрактов: - [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), -Чтобы стать еще большим экспертом по Graph, рассмотрите возможность узнать о других способах обработки изменений в базовых источниках данных. Такие альтернативы, как [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates), могут дать аналогичные результаты +Чтобы стать еще большим экспертом в области Graph, рассмотрите возможность изучения других способов обработки изменений в исходных данных. Альтернативы, такие как [Шаблоны источников данных](/developing/creating-a-subgraph/#data-source-templates), могут привести к аналогичным результатам -> Примечание: многие материалы этой статьи были взяты из ранее опубликованной [статьи по Arweave](/subgraphs/cookbook/arweave/) +> Примечание: Многие материалы из этой статьи были взяты из ранее опубликованной статьи об [Arweave](/subgraphs/cookbook/arweave/) From b4822c0f38105e31607dfc83dbd2021b6194f49b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:10 -0500 Subject: [PATCH 0664/1534] New translations grafting.mdx (Swedish) --- .../pages/sv/subgraphs/cookbook/grafting.mdx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/grafting.mdx b/website/src/pages/sv/subgraphs/cookbook/grafting.mdx index a25fecf797f0..e43fd73014c3 100644 --- a/website/src/pages/sv/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/grafting.mdx @@ -20,7 +20,7 @@ Den ympade subgrafen kan använda ett GraphQL-schema som inte är identiskt med För mer information kan du kontrollera: -- [Ympning](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. @@ -34,9 +34,9 @@ Grafting is a powerful feature that allows you to "graft" one subgraph onto anot ### Bästa praxis -**Inledande Migration**: När du först distribuerar din subgraph till det decentraliserade nätverket, gör det utan grafting. Se till att subgraphen är stabil och fungerar som förväntat. +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. -**Senare Uppdateringar**: När din subgraph är aktiv och stabil på det decentraliserade nätverket kan du använda grafting för framtida versioner för att göra övergången smidigare och bevara historisk data. +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Genom att följa dessa riktlinjer minimerar du riskerna och säkerställer en smidigare migreringsprocess. @@ -44,13 +44,13 @@ Genom att följa dessa riktlinjer minimerar du riskerna och säkerställer en sm Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [Subgraf exempel repo](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Obs: Kontraktet som används i subgrafen togs från följande [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Definition av subgraf manifestet -Subgrafmanifestet `subgraph.yaml` identifierar datakällorna för subgrafen, utlösare av intresse och funktionerna som ska köras som svar på dessa utlösare. Se nedan för ett exempel på subgraf manifest som du kommer att använda: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- `Lock`-datakällan är abi- och kontraktsadressen vi får när vi kompilerar och distribuerar kontraktet +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- Avsnittet `mappning` definierar utlösare av intresse och de funktioner som ska köras som svar på dessa utlösare. I det här fallet lyssnar vi efter händelsen `Withdrawal` och anropar funktionen `handleWithdrawal` när den sänds. +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## Ympnings manifest Definition @@ -97,14 +97,14 @@ graft: ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` är en karta över subgrafen `base` och blocket att ympa på. `block` är blocknumret att börja indexera från. Grafen kopierar data från bas subgrafen till och med det givna blocket och fortsätter sedan att indexera den nya subgrafen från och med det blocket. +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -Värdena `base` och `block` kan hittas genom att distribuera två subgrafer: en för basindexering och en med ympning +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## Distribuera Bas Subgraf 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Följ anvisningarna i `AUTH & Sektionen DEPLOY` på din subgraf sida i mappen `graft-example` från repo +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. När du är klar kontrollerar du att subgrafen indexerar korrekt. Om du kör följande kommando i The Graph Playground ```graphql @@ -146,8 +146,8 @@ Transplantatersättningen subgraph.yaml kommer att ha en ny kontraktsadress. Det 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` 2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Följ anvisningarna i `AUTH & DEPLOY`-avsnittet på din subgraf sida i mappen `graft-replacement` från repo -4. När du är klar kontrollerar du att subgrafen indexerar korrekt. Om du kör följande kommando i The Graph Lekplats +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo +4. När du är klar kontrollerar du att subgrafen indexerar korrekt. Om du kör följande kommando i The Graph Playground ```graphql { @@ -199,4 +199,4 @@ If you want more experience with grafting, here are a few examples for popular c To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> Obs! Mycket material från den här artikeln togs från den tidigare publicerade [Arweave-artikeln](/subgraphs/cookbook/arweave/) +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From b6dd8c483286d28f2aec4d1a23bf0cb233d6ee7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:11 -0500 Subject: [PATCH 0665/1534] New translations grafting.mdx (Turkish) --- .../pages/tr/subgraphs/cookbook/grafting.mdx | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/grafting.mdx b/website/src/pages/tr/subgraphs/cookbook/grafting.mdx index cf4c592ce0b9..60855aa97729 100644 --- a/website/src/pages/tr/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/grafting.mdx @@ -20,37 +20,37 @@ Graftlanan subgraph, temel subgraphla tamamen aynı olmayan, ancak onunla uyumlu Daha fazla bilgi için kontrol edebilirsiniz: -- [Graftlama](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Aşılama](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +Bu eğitimde, temel bir kullanım senaryosunu ele alacağız. Mevcut bir sözleşmeyi özdeş bir sözleşme (aynı koda sahip ancak adresi farklı bir sözleşme) ile değiştireceğiz. Ardından, mevcut subgraph'i yeni sözleşmeyi izleyen "temel" subgraph'e aşılayacağız. ## Ağa Yükseltme Durumunda Graftlamaya İlişkin Önemli Not -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network +> **Dikkat**: Aşılamanın The Graph Ağına yayımlanan subgraph'ler için kullanılmaması önerilir ### Bu Neden Önemli? -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. +Aşılama, bir subgraph'i diğerine "aşılayarak" mevcut subgraph'ten yeni bir versiyona tarihi verileri etkin bir şekilde aktarmanıza olanak tanıyan güçlü bir özelliktir. Bir subgraph'i The Graph Ağından Subgraph Studio'ya geri aşılamak mümkün değildir. ### En İyi Uygulamalar -**İlk Taşıma**: Subgraph'ınızı merkeziyetsiz ağa ilk kez dağıttığınızda, bunu graftlama yapmaksızın gerçekleştirin. Subgraph'ın kararlı olduğundan ve beklendiği gibi çalıştığından emin olun. +**İlk Geçiş**: Subgraph'inizi ilk kez merkeziyetsiz ağa dağıttığınızda aşılama yapmayın. Subgraph'in stabil ve beklendiği gibi çalıştığından emin olun. -**Sonraki Güncellemeler**: Subgraph'ınız merkeziyetsiz ağda yayında ve kararlı olduğunda, geçişi daha sorunsuz hale getirmek ve geçmiş verileri korumak adına gelecek sürümler için graftlamayı kullanabilirsiniz. +**Sonraki Güncellemeler**: Subgraph'iniz merkeziyetsiz ağda canlı ve stabil olduğunda, gelecekteki versiyonlar için aşılama kullanarak geçişi daha sorunsuz hale getirebilir ve tarihi verileri koruyabilirsiniz. Bu yönergelere uyarak riskleri en aza indirebilir ve daha sorunsuz bir taşıma süreci geçirebilirsiniz. ## Mevcut Bir Subgraph'ı Oluşturma -Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +Subgraph oluşturmak, The Graph'in önemli bir parçasıdır. Bu konu daha detaylı olarak [burada](/subgraphs/quick-start/) açıklanmıştır. Bu eğitimde kullanılan mevcut subgraph'i inşa etmek ve dağıtmak için aşağıdaki depo sağlanmıştır: - [Subgraph örnek deposu](https://github.com/Shiyasmohd/grafting-tutorial) -> Not: Subgraph'ta kullanılan sözleşme aşağıdaki [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit)'den alınmıştır. +> Not: Subgraph'te kullanılan sözleşme, [Hackathon Başlangıç Kiti](https://github.com/schmidsi/hackathon-starterkit)'nden alınmıştır. ## Subgraph Manifest Tanımı -Subgraph manifesti `subgraph.yaml`, subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Kullanacağınız örnek bir subgraph bildirimi için aşağıya bakın: +Subgraph manifestosu `subgraph.yaml`, subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Kullanacağınız bir subgraph manifestosu örneği aşağıda verilmiştir: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- `Lock` veri kaynağı, sözleşmeyi derleyip dağıttığımızda alacağımız abi ve sözleşme adresidir -- The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- `mapping` bölümü, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Bu durumda, `Withdrawal` olayının etkinliklerini gözlemliyoruz ve yayıldığında `handleWithdrawal` fonksiyonunu çağırıyoruz. +- `Lock` veri kaynağı, sözleşmeyi derleyip dağıttığımızda elde edeceğimiz "abi" ve sözleşme adresidir +- Ağ, sorgulanan endekslenmiş bir ağa karşılık gelmelidir. Sepolia testnet üzerinde çalıştığımız için, ağ `sepolia`'dır +- `mapping` bölümü, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Bu durumda, `Withdrawal` olayını dinliyoruz ve yayarken `handleWithdrawal` fonksiyonunu çağırıyoruz. ## Graftlama Manifest Tanımı @@ -90,21 +90,21 @@ Graftlama, orijinal subgraph bildirimine iki yeni öğe eklemeyi gerektirir: ```yaml --- features: - - grafting # feature name + - grafting # özellik adı graft: - base: Qm... # subgraph ID of base subgraph - block: 5956000 # block number + base: Qm... # Asıl subgraph'in kimlik numarası + block: 5956000 # blok numarası ``` -- `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` `base` subgraph'ın ve üzerine graftlanacak bloğun bir eşleştirmesidir. `block`, indekslemeye başlanacak blok numarasıdır. Graph, temel subgraph'ın verilerini verilen bloğa kadar ve bu blok dahil olmak üzere kopyalayacak ve ardından yeni subgraph'ı bu bloktan itibaren indekslemeye devam edecektir. +- `features:` tüm kullanılan [özellik adlarının](/developing/creating-a-subgraph/#experimental-features) bir listesidir. +- `graft:` `base` subgraph ve üzerine bağlanılacak bloktan oluşan bir eşlemedir. `block`, endekslemenin başlanacağı blok numarasıdır. The Graph, belirtilen bloka kadar olan temel subgraph'in verisini kopyalayıp bu bloka kadar olan kısmı dahil edecek ve ardından yeni subgraph'i bu bloktan itibaren endekslemeye devam edecek. -`base` ve `block` değerler iki subgraph kullanılarak bulunabilir: biri temel indeksleme için ve diğeri graftlamalı +`base` ve `block` değerleri, iki subgraph dağıtılarak bulunabilir: Biri temel endeksleme için, diğeri ise aşılama için olan subgraph ## Temel Subgraph'ı Dağıtma -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Depoda bulunan `graft-example` klasöründeki subgraph sayfanızda bulunan `AUTH & DEPLOY` bölümündeki talimatları izleyin +1. [Subgraph Studio](https://thegraph.com/studio/) adresine gidip Sepolia testnet üzerinde `graft-example` adlı bir subgraph oluşturun +2. Depodan `graft-example` klasöründeki `AUTH & DEPLOY` bölümündeki yönergeleri izleyin 3. Tamamlandığında, subgraph'ın doğru bir şekilde indekslendiğinden emin olun. Eğer aşağıdaki komutu Graph Test Alanında(Playground) çalıştırırsanız ```graphql @@ -144,9 +144,9 @@ Subgraph'ın düzgün bir şekilde indekslendiğini doğruladıktan sonra, subgr Graft yerine geçen subgraph.yaml yeni bir sözleşme adresine sahip olacaktır. Bu, merkeziyetsiz uygulamanızı güncellediğinizde, bir sözleşmeyi yeniden dağıttığınızda vb. gerçekleşebilir. -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Github deposunda bulunan `graft-replacement` klasöründeki subgraph sayfanızda bulunan `AUTH & DEPLOY` bölümündeki talimatları izleyin +1. [Subgraph Studio](https://thegraph.com/studio/) adresine gidin ve Sepolia testnet üzerinde `graft-replacement` adlı bir subgraph oluşturun +2. Yeni bir manifesto dosyası oluşturun. `graph-replacement` subgraph'ine ait `subgraph.yaml` dosyası, farklı bir sözleşme adresi ve nasıl aşılanması gerektiğiyle ilgili yeni bilgiler içermektedir. Bunlar, eski sözleşme tarafından ilgilendiğiniz [son yayılan olayın](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) `blok`u ve eski subgraph'in `base`'idir (temelidir). `base` subgraph kimliği, orijinal `graph-example` subgraph'inizin `Deployment ID`'sidir (dağıtım kimliğidir). Bunu Subgraph Studio'da bulabilirsiniz. +3. `graft-replacement` klasöründeki subgraph sayfanızda, `AUTH & DEPLOY` bölümündeki talimatları izleyin 4. Tamamlandığında, subgraph'ın doğru bir şekilde indekslendiğinden emin olun. Eğer aşağıdaki komutu Graph Test Alanında(Playground) çalıştırırsanız ```graphql @@ -185,18 +185,18 @@ Aşağıdakileri döndürmelidir: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +`graft-replacement` subgraph'inin eski `graph-example` verilerini ve yeni sözleşme adresinden gelen yeni verileri endekslediğini görebilirsiniz. Orijinal sözleşme, iki `Withdrawal` olayı yaydı: [Olay 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) ve [Olay 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). Yeni sözleşme, sonrasında bir `Withdrawal` olayı yaydı, [Olay 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). Önceden endekslenmiş iki işlem (Olay 1 ve 2) ve yeni işlem (Olay 3), `graft-replacement` subgraph'inde birleştirildi. -Congrats! You have successfully grafted a subgraph onto another subgraph. +Tebrikler! Bir subgraph'i başka bir subgraph'e başarıyla aşıladınız. ## Ek Kaynaklar -If you want more experience with grafting, here are a few examples for popular contracts: +Aşılama konusunda daha fazla deneyim kazanmak istiyorsanız, yaygın kullanılan sözleşmeler için aşağıda birkaç örnek bulunmaktadır: - [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), -To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results +Daha da iyi bir Graph uzmanı olmak için, temel veri kaynaklarındaki değişikliklerle başa çıkmanın diğer yollarını öğrenmeyi değerlendirin. [Veri Kaynağı Şablonları](/developing/creating-a-subgraph/#data-source-templates) gibi alternatifler benzer sonuçlar elde edebilir -> Not: Bu makaledeki birçok materyal daha önce yayınlanan [Arweave makalesinden](/subgraphs/cookbook/arweave/) alınmıştır +> Not: Bu makaledeki materyalin büyük bir kısmı, daha önce yayımlanmış olan [Arweave makalesinden](/subgraphs/cookbook/arweave/) alınmıştır From 8e9afb286f28f0dd65512b00451b1eccf62adccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:12 -0500 Subject: [PATCH 0666/1534] New translations grafting.mdx (Ukrainian) --- .../pages/uk/subgraphs/cookbook/grafting.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/grafting.mdx b/website/src/pages/uk/subgraphs/cookbook/grafting.mdx index ea87f4d73c5e..5455042183df 100644 --- a/website/src/pages/uk/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/grafting.mdx @@ -34,9 +34,9 @@ Grafting is a powerful feature that allows you to "graft" one subgraph onto anot ### Найкращі практики -**Початкова міграція**: коли ви вперше розгортаєте підграф у децентралізованій мережі, робіть це без графтингу. Переконайтеся, що підграф стабільний і функціонує належним чином. +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. -**Подальші оновлення**: після того, як ваш підграф буде запущено і стабільно працюватиме у децентралізованій мережі, ви можете використовувати графтинг для майбутніх версій, щоб зробити перехід більш плавним і зберегти історичні дані. +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. Дотримуючись цих рекомендацій, ви мінімізуєте ризики та забезпечите безперешкодний процес міграції. @@ -44,13 +44,13 @@ Grafting is a powerful feature that allows you to "graft" one subgraph onto anot Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [Репозиторій з прикладом та відповідним підграфом](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> Примітка: Контракт, що використаний у підграфі, був взятий з [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## Визначення маніфесту підграфів -Маніфест підграфів `subgraph.yaml` визначає джерела даних для підграфа, тригери, що нас цікавлять, та функції, які слід запускати у відповідь на ці тригери. Нижче наведено приклад маніфесту підграфів, який ви будете використовувати: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- Джерелом даних `Lock` є адреса abi та адреса контракту, яку ми отримаємо під час компіляції та розгортання контракту +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- Розділ `mapping` визначає тригери, що нас цікавлять, і функції, які мають бути запущені у відповідь на ці тригери. У цьому випадку ми очікуємо на `Withdrawal` і після цього викликаємо функцію `handleWithdrawal` коли вона з'являється. +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## Визначення Grafting Manifest @@ -97,14 +97,14 @@ graft: ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` це схема підграфа `base` і блоку, з яким відбудеться графтинг. `block` - це номер блоку, з якого починається індексування. The Graph скопіює дані базового підграфа до заданого блоку включно, а потім продовжить індексування нового підграфа, починаючи з цього блоку. +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -`base` і `block` можна знайти, розгорнувши два підграфа: один для базової індексації та один для графтингу +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## Розгортання базового підграфа 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Дотримуйтесь інструкцій у розділах `AUTH & DEPLOY` на сторінці вашого підграфа в папці `graft-example` з репозиторію +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. Закінчивши, перевірте, чи правильно індексується підграф. Ви можете зробити це запустивши наступну команду у вікні The Graph Playground ```graphql @@ -146,7 +146,7 @@ graft: 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` 2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Дотримуйтесь інструкцій у розділах `AUTH & DEPLOY` на вашій сторінці підграфа в папці `graft-replacement` з репозиторію +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo 4. Закінчивши, перевірте, чи правильно індексується підграф. Ви можете зробити це запустивши наступну команду у вікні The Graph Playground ```graphql @@ -199,4 +199,4 @@ If you want more experience with grafting, here are a few examples for popular c To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> Примітка: Багато матеріалів для цієї статті було взято з раніше опублікованої [ статті від Arweave](/subgraphs/cookbook/arweave/) +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From cc4081703bf4009038206d7c127cd3271f13e1e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:13 -0500 Subject: [PATCH 0667/1534] New translations grafting.mdx (Chinese Simplified) --- .../pages/zh/subgraphs/cookbook/grafting.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/grafting.mdx b/website/src/pages/zh/subgraphs/cookbook/grafting.mdx index c741d57e8787..321b5b115bec 100644 --- a/website/src/pages/zh/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/grafting.mdx @@ -20,7 +20,7 @@ title: 用嫁接替换合约并保持合约的历史 有关详情,请参阅: -- [嫁接](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. @@ -40,17 +40,17 @@ Grafting is a powerful feature that allows you to "graft" one subgraph onto anot By adhering to these guidelines, you minimize risks and ensure a smoother migration process. -## 构建现有子图 +## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [子图示例存储库](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> 注意: 子图中使用的合约取自以下[Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit)。 +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## 子图清单定义 -子图诠释了`subgraph.yaml`标识子图的数据源、感兴趣的触发器以及应该响应这些触发器而运行的函数。下面是您将使用的子图清单示例: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- `Lock`数据源是我们在编译和部署合约时获得的abi和合约地址 +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- `mapping`部分定义了感兴趣的触发器以及应该响应这些触发器而运行的函数。在这种情况下,我们正在监听`Withdrawl`事件,并在发出该事件时调用`处理Withdrawal`函数。 +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## 嫁接清单定义 @@ -97,14 +97,14 @@ graft: ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft`:是`base`子图和要嫁接到的模块的映射。`block`是开始索引的区块号。Graph将把基本子图的数据复制到给定的区块并将其包括在内,然后从该区块开始继续索引新的子图。 +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -通过部署两个子图可以找到`base`和`block`值:一个用于基索引,一个用于嫁接 +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## 部署基子图 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. 按照存储库中`graft-example`文件夹中子图页面的 `AUTH& DEPLOY `部分中的说明操作 +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. 完成后,验证子图是否正确索引。如果在Graph Playground中运行下列指令。 ```graphql @@ -117,7 +117,7 @@ graft: } ``` -它返回的结果是这样的: +It returns something like this: ``` { @@ -146,7 +146,7 @@ graft: 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` 2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. 按照存储库中的`graft-replacement`文件夹中子图页面上的 `AUTH& DEPLOY `部分的说明操作。 +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo 4. 完成后,验证子图是否正确索引。如果在Graph Playground中运行下列指令。 ```graphql @@ -199,4 +199,4 @@ If you want more experience with grafting, here are a few examples for popular c To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> 注意:这篇文章中的很多内容都来自之前发表的[Arweave文章](/subgraphs/cookbook/arweave/)。 +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From eb885e7bea73ccb215c239e27366997ae79f079e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:14 -0500 Subject: [PATCH 0668/1534] New translations grafting.mdx (Urdu (Pakistan)) --- .../pages/ur/subgraphs/cookbook/grafting.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/grafting.mdx b/website/src/pages/ur/subgraphs/cookbook/grafting.mdx index a03c1ee8d4e9..6cda9bfe1f6e 100644 --- a/website/src/pages/ur/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/grafting.mdx @@ -20,7 +20,7 @@ title: ایک کنٹریکٹ کو تبدیل کریں اور اس کی تاری مزید معلومات کے لۓ، آپ دیکہ سکتے ہیں: -- [گرافٹنگ](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. @@ -34,9 +34,9 @@ Grafting is a powerful feature that allows you to "graft" one subgraph onto anot ### بہترین طریقے -**ابتدائی منتقلی**: جب آپ پہلی بار اپنے سب گراف کو ڈیسنٹرالا ئزڈ نیٹ ورک پر تعینات کرتے ہیں، تو بغیر گرافٹنگ کے ایسا کریں۔ یقینی بنائیں کہ سب گراف مستحکم ہے اور توقع کے مطابق کام کر رہا ہے. +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. -**بعد کے اپ ڈیٹس**: ایک بار جب آپ کا سب گراف ڈیسینٹرلائزڈ نیٹ ورک پر لائیو اور مستحکم ہو جاتا ہے، تو آپ منتقلی کو ہموار بنانے اور تاریخی ڈیٹا کو محفوظ رکھنے کے لیے مستقبل کے ورژنز کے لیے گرافٹنگ کا استعمال کر سکتے ہیں. +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. ان رہنما خطوط پر عمل پیرا ہو کر، آپ خطرات کو کم کرتے ہیں اور منتقلی کے ایک ہموار عمل کو یقینی بناتے ہیں. @@ -44,13 +44,13 @@ Grafting is a powerful feature that allows you to "graft" one subgraph onto anot Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [سب گراف مثلی ریپو](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> نوٹ: سب گراف میں استعمال ہونے والا کنٹریکٹ درج ذیل [ہیکاتھون سٹارٹر کٹ](https://github.com/schmidsi/hackathon-starterkit) سے لیا گیا تھا. +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## سب گراف مینی فیسٹ کی تعریف -سب گراف مینی فیسٹ `subgraph.yaml` سب گراف کے لیے ڈیٹا کے ذرائع، دلچسپی کے محرکات، اور ان افعال کی نشاندہی کرتا ہے جو ان محرکات کے جواب میں چلائے جانے چاہئیں۔ ذیل میں ایک سب گراف مینی فیسٹ کی مثال دیکھیں جو آپ استعمال کریں گے: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- `Lock` ڈیٹا کا ذریعہ abi اور کنٹریکٹ ایڈریس ہے جب ہم کنٹریکٹ کو مرتب اور تعینات کریں گے +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- `mapping` سیکشن دلچسپی کے محرکات اور ان افعال کی وضاحت کرتا ہے جنہیں ان محرکات کے جواب میں چلایا جانا چاہیے۔ اس صورت میں، ہم `Withdrawal` ایونٹ کو سن رہے ہیں اور جب یہ خارج ہوتا ہے تو `handleWithdrawal` فنکشن کو کال کر رہے ہیں. +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## گرافٹنگ مینی فیسٹ کی تعریف @@ -97,14 +97,14 @@ graft: ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` `base` سب گراف کا نقشہ ہے اور اس پر گرافٹ کرنے کے لیے بلاک ہے۔ `block` وہ بلاک نمبر ہے جس سے انڈیکس کرنا شروع کیا جائے۔ گراف بیس سب گراف کے ڈیٹا کو دیے گئے بلاک تک اور اس سمیت کاپی کرے گا اور پھر اس بلاک سے نئے سب گراف کو انڈیکس کرنا جاری رکھے گا. +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -`base` اور `block` اقدار کو دو سب گراف لگا کر تلاش کیا جا سکتا ہے: ایک بیس انڈیکسنگ کے لیے اور ایک گرافٹنگ کے ساتھ ہے +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## بیس سب گراف تعینات کرنا 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. `AUTH & DEPLOY` سیکشن میں `graft-example` فولڈر میں اپنے سب گراف صفحہ پر دی گئی ہدایات پر عمل کریں ریپو سے +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. ایک دفعہ ختم ہو جاۓ، تصدیق کریں کے سب گراف صحیح سے انڈیکس ہو رہا ہے. اگر آپ درج کمانڈ گراف پلے گراونڈ میں چلائیں ```graphql @@ -146,7 +146,7 @@ graft: 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` 2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. `AUTH & DEPLOY` سیکشن میں `graft-example` فولڈر میں اپنے سب گراف صفحہ پر دی گئی ہدایات پر عمل کریں ریپو سے +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo 4. ایک دفعہ ختم ہو جاۓ، تصدیق کریں کے سب گراف صحیح سے انڈیکس ہو رہا ہے. اگر آپ درج کمانڈ گراف پلے گراونڈ میں چلائیں ```graphql @@ -199,4 +199,4 @@ If you want more experience with grafting, here are a few examples for popular c To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> نوٹ: اس مضمون کا بہت سا مواد پہلے شائع شدہ [آرویو آرٹیکل](/subgraphs/cookbook/arweave/) سے لیا گیا ہے +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From 967001850846f1ea2217fbc44e729acaa026f286 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:15 -0500 Subject: [PATCH 0669/1534] New translations grafting.mdx (Vietnamese) --- .../src/pages/vi/subgraphs/cookbook/grafting.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/grafting.mdx b/website/src/pages/vi/subgraphs/cookbook/grafting.mdx index ac324b79cad8..2d9b2a16a1ef 100644 --- a/website/src/pages/vi/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/grafting.mdx @@ -10,17 +10,17 @@ Grafting reuses the data from an existing subgraph and starts indexing it at a l The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: -- Nó thêm hoặc xóa các loại thực thể -- Nó loại bỏ các thuộc tính khỏi các loại thực thể -- Nó thêm các thuộc tính nullable vào các loại thực thể -- Nó biến các thuộc tính không thể nullable thành các thuộc tính nullable -- Nó thêm giá trị vào enums -- Nó thêm hoặc xóa các giao diện +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces - Nó thay đổi đối với loại thực thể nào mà một giao diện được triển khai For more information, you can check: -- [Ghép](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. From f7779209582377ad78c0dbecdace17d16ec4a181 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:16 -0500 Subject: [PATCH 0670/1534] New translations grafting.mdx (Marathi) --- .../pages/mr/subgraphs/cookbook/grafting.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/grafting.mdx b/website/src/pages/mr/subgraphs/cookbook/grafting.mdx index f1e1538b93dc..3ceb7d2c7901 100644 --- a/website/src/pages/mr/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/grafting.mdx @@ -20,7 +20,7 @@ title: करार बदला आणि त्याचा इतिहास अधिक माहितीसाठी, तुम्ही तपासू शकता: -- [कलम करणे](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. @@ -44,13 +44,13 @@ By adhering to these guidelines, you minimize risks and ensure a smoother migrat Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [सबग्राफ उदाहरण रेपो](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> टीप: सबग्राफमध्ये वापरलेला करार खालील [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit) वरून घेतला होता. +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## सबग्राफ मॅनिफेस्ट व्याख्या -सबग्राफ मॅनिफेस्ट `subgraph.yaml` सबग्राफसाठी डेटा स्रोत, स्वारस्य ट्रिगर आणि त्या ट्रिगर्सना प्रतिसाद म्हणून चालवल्या जाणार्‍या फंक्शन्स ओळखतो. तुम्ही वापरणार असलेल्या सबग्राफ मॅनिफेस्टच्या उदाहरणासाठी खाली पहा: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -81,7 +81,7 @@ dataSources: - The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract - The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- `मॅपिंग` विभाग स्वारस्यांचे ट्रिगर आणि त्या ट्रिगरला प्रतिसाद म्हणून चालवल्या जाणार्‍या कार्ये परिभाषित करतो. या प्रकरणात, आम्ही `Withdrawal` इव्हेंट ऐकत आहोत आणि जेव्हा ते उत्सर्जित होते तेव्हा `handleWithdrawal` फंक्शनला कॉल करत आहोत. +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## Grafting मॅनिफेस्ट व्याख्या @@ -97,9 +97,9 @@ graft: ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `ग्राफ्ट:` हा `बेस` सबग्राफचा नकाशा आणि ग्राफ्ट ऑन ब्लॉक आहे. `block` हा ब्लॉक नंबर आहे ज्यावरून अनुक्रमणिका सुरू करायची आहे. आलेख बेस सबग्राफचा डेटा कॉपी करेल आणि दिलेल्या ब्लॉकपर्यंत आणि नंतर त्या ब्लॉकमधून नवीन सबग्राफ अनुक्रमित करणे सुरू ठेवेल. +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -`बेस` आणि `ब्लॉक` व्हॅल्यू दोन सबग्राफ तैनात करून शोधता येतात: एक बेस इंडेक्सिंगसाठी आणि एक ग्राफ्टिंगसह +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## बेस सबग्राफ तैनात करणे @@ -193,10 +193,10 @@ Congrats! You have successfully grafted a subgraph onto another subgraph. If you want more experience with grafting, here are a few examples for popular contracts: -- [वक्र](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) +- [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> टीप: या लेखातील बरीच सामग्री पूर्वी प्रकाशित [Arweave लेख](/subgraphs/cookbook/arweave/) मधून घेतली गेली आहे +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From d8fa27795b1510d48b52227061c4b769738a40cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:17 -0500 Subject: [PATCH 0671/1534] New translations grafting.mdx (Hindi) --- .../pages/hi/subgraphs/cookbook/grafting.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/grafting.mdx b/website/src/pages/hi/subgraphs/cookbook/grafting.mdx index cebcc5f1c533..c0703bcfb101 100644 --- a/website/src/pages/hi/subgraphs/cookbook/grafting.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/grafting.mdx @@ -20,7 +20,7 @@ title: एक कॉन्ट्रैक्ट बदलें और उसक अधिक जानकारी के लिए आप देख सकते हैं: -- [ग्राफ्टिंग](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) +- [Grafting](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) इस ट्यूटोरियल में, हम एक बुनियादी उपयोग मामले को कवर करेंगे। हम एक मौजूदा कॉन्ट्रैक्ट को एक समान कॉन्ट्रैक्ट (नए पते के साथ, लेकिन वही कोड) से बदलेंगे। फिर, मौजूदा Subgraph को "बेस" Subgraph पर जोड़ेंगे, जो नए कॉन्ट्रैक्ट को ट्रैक करता है। @@ -44,13 +44,13 @@ By adhering to these guidelines, you minimize risks and ensure a smoother migrat Building subgraphs is an essential part of The Graph, described more in depth [here](/subgraphs/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: -- [सब-ग्राफ उदाहरण रेपो](https://github.com/Shiyasmohd/grafting-tutorial) +- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) -> नोट: सब-ग्राफ में इस्तेमाल किया गया कॉन्ट्रैक्ट निम्न [हैकथॉन आरम्भक](https://github.com/schmidsi/hackathon-starterkit) से लिया गया है| +> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). ## सब ग्राफ मैनिफेस्ट की परिभाषा -सब ग्राफ मैनिफेस्ट `subgraph.yaml` द्वारा सब-ग्राफ के डाटा सोर्स, ट्रिगर ऑफ़ इंटरेस्ट, और इन ट्रिगर के जवाब में सिस्टेमाल होने वाले फंक्शन्स की पहचान करता है| आपके द्वारा इस्तेमाल किये जाने वाले सब-ग्राफ मैनिफेस्ट का उदाहरण नीचे देखें: +The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: ```yaml specVersion: 0.0.4 @@ -79,9 +79,9 @@ dataSources: file: ./src/lock.ts ``` -- `Lock` डाटा सोर्स वह ऐ बी आई और कॉन्ट्रैक्ट एड्रेस है जो कि हमे तब मिलेगा जब हम अपना कॉन्ट्रैक्ट संकलित और तैनात करते हैं| -- नेटवर्क को उस इंडेक्स किए गए नेटवर्क के अनुरूप होना चाहिए जिसे क्वेरी किया जा रहा है। चूंकि हम **Sepolia** टेस्टनेट पर काम कर रहे हैं, नेटवर्क `sepolia` है। -- `mapping` सेक्शन उन ट्रिगर ऑफ़ इंटरेस्ट और उनके जवाब में चलने वाले फंक्शन्स को परिभासित करता है| इस स्थिति में, हम `Withdrawal` फंक्शन को सुनते हैं और `handleWithdrawal` फंक्शन को कॉल करते हैं| +- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract +- The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` +- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. ## ग्राफ्टिंग मैनिफेस्ट की परिभाषा @@ -97,14 +97,14 @@ graft: ``` - `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:`: `base` सब-ग्राफ और उसके ऊपर ग्राफ्ट किये जाने वाले ब्लॉक का मैप है| `block` वह ब्लॉक संख्या है जहाँ से इंडेक्सिंग स्टार्ट करनी है| द ग्राफ बेस सब-ग्राफ तक के डाटा को कॉपी करके, जिसमे दिया हुआ ब्लॉक भी शामिल है, फिर आगे के नए सब-ग्राफ को उस ब्लॉक से इंडेक्स करना शुरू कर देता है| +- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. -`base` और `block` मूल्यों को दो सब-ग्राफ्स को डेप्लॉय करके पता किया जा सकता है: एक बेस इंडेक्सिंग के लिए और एक ग्राफ्टिंग के लिए| +The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting ## बेस सब-ग्राफ को तैनात करना 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. अपने सब-ग्राफ पेज पर `AUTH & DEPLOY` भाग में `graft-example` फोल्डर में दिए गए दिशा निर्देशों का पालन करें| +2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo 3. एक बार पूरा होने पर, सत्यापित करें की इंडेक्सिंग सही ढंग से हो गयी है| यदि आप निम्न कमांड ग्राफ प्लेग्राउंड में चलाते हैं ```graphql @@ -146,7 +146,7 @@ graft: 1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` 2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. अपने सब-ग्राफ पेज पर `AUTH & DEPLOY` भाग में `graft-replacement` फोल्डर में दिए गए दिशा निर्देशों का पालन करें| +3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo 4. एक बार पूरा होने पर, सत्यापित करें की इंडेक्सिंग सही ढंग से हो गयी है| यदि आप निम्न कमांड ग्राफ प्लेग्राउंड में चलाते हैं ```graphql @@ -189,14 +189,14 @@ You can see that the `graft-replacement` subgraph is indexing from older `graph- Congrats! You have successfully grafted a subgraph onto another subgraph. -## अतिरिक्त संसाधन +## Additional Resources यदि आप grafting के साथ अधिक अनुभव प्राप्त करना चाहते हैं, तो यहां कुछ लोकप्रिय कॉन्ट्रैक्ट्स के उदाहरण दिए गए हैं: -- [कर्व](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) -- [इ आर सी - 721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) +- [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) +- [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results -> नोट: इस आर्टिकल में काफी सामग्री पहले से प्रकाशित [आरवीव आर्टिकल](/subgraphs/cookbook/arweave/) से ली गयी है| +> Note: A lot of material from this article was taken from the previously published [Arweave article](/subgraphs/cookbook/arweave/) From aa5008bccd3accb0cce2292808d77b017df412d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:18 -0500 Subject: [PATCH 0672/1534] New translations immutable-entities-bytes-as-ids.mdx (Romanian) --- .../ro/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ro/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/ro/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 05d49078ab67d5a6fee0757388507be717f22d69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:19 -0500 Subject: [PATCH 0673/1534] New translations immutable-entities-bytes-as-ids.mdx (French) --- .../immutable-entities-bytes-as-ids.mdx | 63 ++++++++++--------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/fr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..080edb2544e9 100644 --- a/website/src/pages/fr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,14 +1,15 @@ --- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +title: Bonne pratique pour les subgraphs 3 - Améliorer l'Indexation et les Performances de Recherche en Utilisant des Entités Immuables et des Bytes comme IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. +Utiliser des Entités Immuables et des Bytes pour les IDs dans notre fichier `schema.graphql` [améliore considérablement ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) la vitesse d'indexation et les performances de recherche. -## Immutable Entities +## Entités Immuables -To make an entity immutable, we simply add `(immutable: true)` to an entity. +Pour rendre une entité immuable, il suffit d'ajouter `(immutable: true)` à cette entité. ```graphql type Transfer @entity(immutable: true) { @@ -19,21 +20,21 @@ type Transfer @entity(immutable: true) { } ``` -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. +En rendant l'entité `Transfer` immuable, graph-node est capable de traiter l'entité plus efficacement, améliorant la vitesse d'indexation et la réactivité des requêtes. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. -### Under the hood +### Sous le capot -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. +Les entités mutables ont une "plage de blocs" indiquant leur validité. La mise à jour de ces entités nécessite que le graph node ajuste la plage de blocs des versions précédentes, augmentant la charge de travail de la base de données. Les requêtes nécessitent également un filtrage pour trouver uniquement les entités actives. Les entités immuables sont plus rapides car elles sont toutes actives et, puisqu'elles ne changeront pas, aucun contrôle ou mise à jour n'est requis lors de l'écriture, et aucun filtrage n'est requis pendant les requêtes. -### When not to use Immutable Entities +### Quand ne pas utiliser les Entités Immuables -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. +Si vous avez un champ comme `status` qui doit être modifié au fil du temps, alors vous ne devriez pas rendre l'entité immuable. Autrement, vous devriez utiliser des entités immuables dès que possible. -## Bytes as IDs +## Bytes comme IDs -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. +Chaque entité nécessite un ID. Dans l'exemple précédent, nous pouvons voir que l'ID est déjà du type Bytes. ```graphql type Transfer @entity(immutable: true) { @@ -44,19 +45,19 @@ type Transfer @entity(immutable: true) { } ``` -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. +Bien que d'autres types d'ID soient possibles, tels que String et Int8, il est recommandé d'utiliser le type Bytes pour tous les IDs en raison des chaînes de caractères prenant deux fois plus d'espace que les chaînes Byte pour stocker des données binaires, et les comparaisons de chaînes de caractères UTF-8 doivent tenir compte de la locale, ce qui est beaucoup plus coûteux que la comparaison binaire utilisée pour comparer les chaînes de caractères Byte. -### Reasons to Not Use Bytes as IDs +### Raisons de ne pas utiliser les Bytes comme IDs -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. +1. Si les IDs d'entité doivent être lisibles par les humains, comme les IDs numériques auto-incrémentés ou les chaînes lisibles, les Bytes pour les IDs ne doivent pas être utilisés. +2. Si nous intégrons des données d'un subgraph avec un autre modèle de données qui n'utilise pas les Bytes comme IDs, les Bytes comme IDs ne doivent pas être utilisés. +3. Les améliorations de performances d'indexation et de recherche ne sont pas souhaitées. -### Concatenating With Bytes as IDs +### Concatenation Avec Bytes comme IDs -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. +Il est courant dans de nombreux subgraphs d'utiliser la concaténation de chaînes de caractères pour combiner deux propriétés d'un événement en un seul ID, comme utiliser `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`.. Cependant, comme cela retourne une chaîne de caractères, cela nuit considérablement à la performance d'indexation et de recherche des subgraphs. -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. +Au lieu de cela, nous devrions utiliser la méthode `concatI32()` pour concaténer les propriétés des événements. Cette stratégie donne un ID de type Bytes beaucoup plus performant. ```typescript export function handleTransfer(event: TransferEvent): void { @@ -73,11 +74,11 @@ export function handleTransfer(event: TransferEvent): void { } ``` -### Sorting With Bytes as IDs +### Tri avec Bytes comme IDs -Sorting using Bytes as IDs is not optimal as seen in this example query and response. +Le tri utilisant les Bytes comme IDs n'est pas optimal, comme le montre cet exemple de requête et de réponse. -Query: +Requête: ```graphql { @@ -90,7 +91,7 @@ Query: } ``` -Query response: +Réponse de la requête: ```json { @@ -119,9 +120,9 @@ Query response: } ``` -The IDs are returned as hex. +Les IDs sont renvoyés sous forme hexadécimale. -To improve sorting, we should create another field on the entity that is a BigInt. +Pour améliorer le tri, nous devrions créer un autre champ sur l'entité qui est un BigInt. ```graphql type Transfer @entity { @@ -133,9 +134,9 @@ type Transfer @entity { } ``` -This will allow for sorting to be optimized sequentially. +Ceci permettra d'optimiser le tri de manière séquentielle. -Query: +Requête: ```graphql { @@ -146,7 +147,7 @@ Query: } ``` -Query Response: +Réponse de la requête: ```json { @@ -171,9 +172,9 @@ Query Response: ## Conclusion -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. +L'utilisation à la fois d' Entités immuables et de Bytes en tant qu'IDs a montré une amélioration marquée de l'efficacité des subgraphs. Plus précisément, des tests ont mis en évidence une augmentation de 28% des performances des requêtes et une accélération de 48% des vitesses d'indexation. -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). +En savoir plus sur l'utilisation des Entités immuables et des Bytes en tant qu'IDs dans cet article de blog de David Lutterkort, un ingénieur logiciel chez Edge & Node : [Deux améliorations simples des performances des subgraphs](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). ## Subgraph Best Practices 1-6 From 8a7260a2a3ba2217eff6c3b2768ab076e301332f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:20 -0500 Subject: [PATCH 0674/1534] New translations immutable-entities-bytes-as-ids.mdx (Spanish) --- .../es/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/es/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/es/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/es/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 9e10bf817c986a26fccfd08e172b3b9f91b56aa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:21 -0500 Subject: [PATCH 0675/1534] New translations immutable-entities-bytes-as-ids.mdx (Arabic) --- .../ar/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ar/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/ar/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 520295c06453bfba92844d90638a00d05ba7b88a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:22 -0500 Subject: [PATCH 0676/1534] New translations immutable-entities-bytes-as-ids.mdx (Czech) --- .../cs/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/cs/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index f63f6ba6fb03..6465a96d5328 100644 --- a/website/src/pages/cs/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Osvědčený postup 3 - Zlepšení indexování a výkonu dotazů pomocí neměnných entit a bytů jako ID +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { Tím, že je entita `Transfer` neměnná, je grafový uzel schopen ji zpracovávat efektivněji, což zvyšuje rychlost indexování a odezvu dotazů. -Struktury neměnných entit se v budoucnu nezmění. Ideální entitou, která by se měla stát nezměnitelnou entitou, by byla entita, která přímo zaznamenává data událostí v řetězci, například událost `Převod` by byla zaznamenána jako entita `Převod`. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Pod kapotou From 3f8c34108f5568e05b15855f1f422cdbcfa664ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:24 -0500 Subject: [PATCH 0677/1534] New translations immutable-entities-bytes-as-ids.mdx (German) --- .../de/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/de/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/de/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/de/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 378abb61f78fd429c0ca50acaee4f800a773ad01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:25 -0500 Subject: [PATCH 0678/1534] New translations immutable-entities-bytes-as-ids.mdx (Italian) --- .../it/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/it/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/it/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/it/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From c34cea3c477a8cdaa9f2bd7f3d462ac0087a87d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:26 -0500 Subject: [PATCH 0679/1534] New translations immutable-entities-bytes-as-ids.mdx (Japanese) --- .../ja/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ja/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/ja/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 92f1a04f5a6904a49b5429df8e1c266f3710c7a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:27 -0500 Subject: [PATCH 0680/1534] New translations immutable-entities-bytes-as-ids.mdx (Korean) --- .../ko/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ko/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/ko/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 744138777d37bb8c35f607832ca136942620b5f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:27 -0500 Subject: [PATCH 0681/1534] New translations immutable-entities-bytes-as-ids.mdx (Dutch) --- .../nl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/nl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/nl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From fea72cb5bfa4b4e42c84be5406efdc714ff2d745 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:28 -0500 Subject: [PATCH 0682/1534] New translations immutable-entities-bytes-as-ids.mdx (Polish) --- .../pl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/pl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/pl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 5267d18a78962b95ca5e2e370acaf7521535d41d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:29 -0500 Subject: [PATCH 0683/1534] New translations immutable-entities-bytes-as-ids.mdx (Portuguese) --- .../immutable-entities-bytes-as-ids.mdx | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/pt/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index 49802e0ea200..fcd967fb1ff3 100644 --- a/website/src/pages/pt/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Boas Práticas de Subgraph 3 - Como Melhorar o Desempenho da Indexação e de Queries com Entidades Imutáveis e Bytes como IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { Ao tornar a entidade `Transfer` imutável, o graph-node pode processá-la com mais eficácia, o que melhora as velocidades de indexação e a capacidade de resposta das queries. -Estruturas de Entidades Imutáveis não mudarão no futuro. Uma entidade ideal para se tornar Imutável seria uma que grava diretamente dados de eventos on-chain; por exemplo, um evento `Transfer` gravado como uma entidade `Transfer`. +Estruturas de Entidades Imutáveis não mudarão no futuro. Uma entidade ideal para tornar Imutável seria uma que grava diretamente dados de eventos on-chain; por exemplo, um evento `Transfer` registrado como uma entidade `Transfer`. ### De dentro da casca @@ -175,16 +176,16 @@ Resposta de query: Leia mais sobre o uso de Entidades Imutáveis e Bytes como IDs nesta publicação por David Lutterkort, Engenheiro de Software na Edge & Node: [Duas Melhorias Simples no Desempenho de Subgraphs](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). -## Subgraph Best Practices 1-6 +## Melhores Práticas para um Subgraph 1 – 6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Pruning: Reduza o Excesso de Dados do Seu Subgraph para Acelerar Queries](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Use o @derivedFrom para Melhorar a Resposta da Indexação e de Queries](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Melhore o Desempenho da Indexação e de Queries com o Uso de Bytes como IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Evite `eth-calls` para Acelerar a Indexação](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplifique e Otimize com Séries Temporais e Agregações](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Lance Hotfixes Mais Rápido com Enxertos](/subgraphs/cookbook/grafting-hotfix/) From bc5e9663c708ac6b32e7ccefe59ae24f63a48a04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:30 -0500 Subject: [PATCH 0684/1534] New translations immutable-entities-bytes-as-ids.mdx (Russian) --- .../immutable-entities-bytes-as-ids.mdx | 81 ++++++++++--------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ru/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..2d8d7c97e666 100644 --- a/website/src/pages/ru/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,14 +1,15 @@ --- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +title: Лучшие практики для субграфов №3 – Улучшение индексирования и производительности запросов с использованием неизменяемых объектов и байтов в качестве идентификаторов +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- -## TLDR +## Краткое содержание -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. +Использование неизменяемых объектов и байтов в качестве идентификаторов в файле `schema.graphql` [значительно улучшает](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) скорость индексирования и производительность запросов. -## Immutable Entities +## Неизменяемые объекты -To make an entity immutable, we simply add `(immutable: true)` to an entity. +Чтобы сделать объект неизменяемым, просто добавьте `(immutable: true)` к объекту. ```graphql type Transfer @entity(immutable: true) { @@ -19,21 +20,21 @@ type Transfer @entity(immutable: true) { } ``` -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. +Сделав объект `Transfer` неизменяемым, graph-node сможет обрабатывать его более эффективно, что улучшит скорость индексирования и отклик на запросы. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Структуры неизменяемых объектов не будут изменяться в будущем. Идеальным кандидатом для превращения в неизменяемый объект может быть объект, который напрямую фиксирует данные событий в блокчейне, например, событие `Transfer`, записываемое как объект `Transfer`. -### Under the hood +### Под капотом -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. +Изменяемые объекты имеют «диапазон блоков», указывающий их актуальность. Обновление таких объектов требует от graph node корректировки диапазона блоков для предыдущих версий, что увеличивает нагрузку на базу данных. Запросы также должны фильтровать данные, чтобы находить только актуальные объекты. Неизменяемые объекты работают быстрее, поскольку все они актуальны, и, так как они не изменяются, не требуется никаких проверок или обновлений при записи, а также фильтрации во время выполнения запросов. -### When not to use Immutable Entities +### Когда не следует использовать неизменяемые объекты -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. +Если у Вас есть поле, такое как `status`, которое необходимо изменять с течением времени, то не следует делать объект неизменяемым. В противном случае, используйте неизменяемые объекты, когда это возможно. -## Bytes as IDs +## Использование Bytes в качестве идентификаторов -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. +Каждый объект требует уникального идентификатора. В предыдущем примере мы видим, что идентификатор уже имеет тип Bytes. ```graphql type Transfer @entity(immutable: true) { @@ -44,19 +45,19 @@ type Transfer @entity(immutable: true) { } ``` -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. +Хотя для идентификаторов возможны и другие типы, такие как String и Int8, рекомендуется использовать тип Bytes для всех идентификаторов. Это связано с тем, что строковые данные занимают в два раза больше места, чем строковые данные в формате Byte, а сравнение строк в кодировке UTF-8 требует учета локали, что намного более затратно по сравнению с побайтовым сравнением, используемым для строк типа Byte. -### Reasons to Not Use Bytes as IDs +### Причины, по которым не стоит использовать Bytes как идентификаторы -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. +1. Если идентификаторы объектов должны быть читаемыми для человека, например, автоинкрементированные числовые идентификаторы или читаемые строки, то не следует использовать тип Bytes для идентификаторов. +2. Если данные субграфа интегрируются с другой моделью данных, которая не использует тип Bytes для идентификаторов, то не следует использовать Bytes для идентификаторов в субграфе. +3. Если улучшения производительности индексирования и запросов не являются приоритетом. -### Concatenating With Bytes as IDs +### Конкатенация (объединение) с использованием Bytes как идентификаторов -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. +Это распространенная практика во многих субграфах — использовать конкатенацию строк для объединения двух свойств события в единый идентификатор, например, используя `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. Однако поскольку это возвращает строку, такой подход значительно ухудшает производительность индексирования и запросов в субграфах. -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. +Вместо этого следует использовать метод `concatI32()` для конкатенации свойств события. Эта стратегия приводит к созданию идентификатора типа `Bytes`, который гораздо более производителен. ```typescript export function handleTransfer(event: TransferEvent): void { @@ -73,11 +74,11 @@ export function handleTransfer(event: TransferEvent): void { } ``` -### Sorting With Bytes as IDs +### Сортировка с использованием идентификаторов Bytes -Sorting using Bytes as IDs is not optimal as seen in this example query and response. +Сортировка с использованием идентификаторов Bytes не является оптимальной, как это видно из примера запроса и ответа. -Query: +Запрос: ```graphql { @@ -90,7 +91,7 @@ Query: } ``` -Query response: +Ответ на запрос: ```json { @@ -119,9 +120,9 @@ Query response: } ``` -The IDs are returned as hex. +Идентификаторы возвращаются в виде шестнадцатеричной строки. -To improve sorting, we should create another field on the entity that is a BigInt. +Чтобы улучшить сортировку, мы должны создать другое поле в объекте, которое будет иметь тип BigInt. ```graphql type Transfer @entity { @@ -133,9 +134,9 @@ type Transfer @entity { } ``` -This will allow for sorting to be optimized sequentially. +Это позволит оптимизировать сортировку в последовательном порядке. -Query: +Запрос: ```graphql { @@ -146,7 +147,7 @@ Query: } ``` -Query Response: +Ответ на запрос: ```json { @@ -169,22 +170,22 @@ Query Response: } ``` -## Conclusion +## Заключение -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. +Использование как неизменяемых объектов, так и Bytes как идентификаторов значительно улучшает эффективность субграфов. В частности, тесты показали увеличение производительности запросов до 28% и ускорение индексирования до 48%. -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). +Читайте больше о применении неизменяемых объектов и Bytes как идентификаторов в этом блоге от Дэвида Луттеркорта, инженера-программиста в Edge & Node: [Два простых способа улучшить производительность субграфов](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). -## Subgraph Best Practices 1-6 +## Лучшие практики для субграфов 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Увеличение скорости запросов с помощью обрезки субграфов](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Улучшение индексирования и отклика запросов с использованием @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Улучшение индексирования и производительности запросов с использованием неизменяемых объектов и байтов в качестве идентификаторов](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Увеличение скорости индексирования путем избегания `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Упрощение и оптимизация с помощью временных рядов и агрегаций](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Использование переноса (графтинга) для быстрого развертывания исправлений](/subgraphs/cookbook/grafting-hotfix/) From b3b34122372414fe7dd1e774c801d5d5c7555b16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:31 -0500 Subject: [PATCH 0685/1534] New translations immutable-entities-bytes-as-ids.mdx (Swedish) --- .../sv/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/sv/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/sv/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 9f2f2f66317909defd9107a521a5d0e7b2da985b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:33 -0500 Subject: [PATCH 0686/1534] New translations immutable-entities-bytes-as-ids.mdx (Turkish) --- .../immutable-entities-bytes-as-ids.mdx | 81 ++++++++++--------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/tr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..c64399088031 100644 --- a/website/src/pages/tr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,14 +1,15 @@ --- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +title: Subgraph Örnek Uygulama 3 - Değişmez Varlıklar ve Byte'ları Kimlik Olarak Kullanarak Endeksleme ve Sorgu Performansını İyileştirin +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- -## TLDR +## Özet -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. +`schema.graphql` dosyamızda Kimlikler için Değişmez Varlıklar ve Byte'lar kullanmak endeksleme hızını ve sorgu performansını [önemli ölçüde](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) arttırır. -## Immutable Entities +## Değişmez Varlıklar -To make an entity immutable, we simply add `(immutable: true)` to an entity. +Bir varlığı değişmez hale getirmek için, varlığa `(immutable: true)` ekleriz. ```graphql type Transfer @entity(immutable: true) { @@ -19,21 +20,21 @@ type Transfer @entity(immutable: true) { } ``` -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. +`Transfer` varlığını değişmez hale getirerek, graph-node'un varlığı daha verimli bir şekilde işlemesini sağlayabiliriz. Bu da endeksleme hızını ve sorgu yanıt verebilirliğini artırır. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Değişmez Varlıkların yapıları gelecekte değişmeyecektir. "Değişmez Varlık" haline getirilecek ideal bir varlık, doğrudan zincir üstü olay verilerini kaydeden bir varlık, (örneğin bir `Transfer` olayının `Transfer` varlığı olarak kaydedilmesi) gibi olabilir. -### Under the hood +### Detaylı İnceleme -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. +Değişebilir varlıkların geçerliliklerini belirten bir 'blok aralığı' bulunur. Bu varlıkları güncellemek, Graph Düğümü'nün önceki versiyonların blok aralığını ayarlamasını gerektirir. Bu da veritabanı iş yükünü artırır. Sorguların da yalnızca canlı ögeleri bulmak için filtrelenmesi gerekir. Değişmez ögeler daha hızlıdır. Çünkü hepsi canlıdır ve değişmeyeceklerinden dolayı, yazma sırasında herhangi bir kontrol veya güncellemeye gerek yoktur. Ayrıca, sorgular sırasında herhangi bir filtrelemeye de gerek kalmaz. -### When not to use Immutable Entities +### Değişmez Varlıklar ne zaman kullanmamalı -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. +Eğer zamanla değiştirilmesi gereken `status` gibi bir alanınız varsa, varlığı değişmez yapmamalısınız. Bunun dışında mümkünse değişmez varlıklar kullanmalısınız. -## Bytes as IDs +## ID (Kimlik Numarası) Olarak Bytes -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. +Her varlığın bir ID'si olmalıdır. Önceki örnekte, ID'nin zaten Bytes türünde olduğunu görebiliriz. ```graphql type Transfer @entity(immutable: true) { @@ -44,19 +45,19 @@ type Transfer @entity(immutable: true) { } ``` -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. +ID'ler için String ve Int8 gibi başka türler kullanmak mümkün olsa da, tüm ID'ler için Bytes türünün kullanılması önerilir. Çünkü karakter dizileri ikili (binary) verileri saklamak için Bayt dizilerinden iki kat fazla yer kaplar. Ayrıca, UTF-8 karakter dizilerinin karşılaştırmaları, yerel ayarı hesaba katması gerekliliğinden ötürü, Bayt dizilerinin karşılaştırmalarında kullanılan bayt temelli (bytewise) karşılaştırmalardan çok daha maliyetlidir. -### Reasons to Not Use Bytes as IDs +### ID Olarak Bytes Kullanmama Nedenleri -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. +1. Eğer varlık ID'leri otomatik artırılan sayısal ID'ler veya okunabilir dizeler gibi insan tarafından okunabilir olmalıysa, ID için Bytes kullanılmamalıdır. +2. Bir subgraph'in verilerini Bytes'ı ID olarak kullanmayan başka bir veri modeliyle entegre ediyorsanız, ID için Bayt kullanılmamalıdır. +3. Endeksleme ve sorgulama gibi performans iyileştirmeleri istenmiyorsa. -### Concatenating With Bytes as IDs +### ID Olarak Bytes'ı Başka Bir Özellikle Birleştirme -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. +Birçok subgraph'te, bir olayın iki özelliğini tek bir ID'de birleştirmek için dizi birleştirme kullanmak yaygın bir uygulamadır: örneğin, `event.transaction.hash.toHex() + "-" + event.logIndex.toString()` gibi. Ancak, bu bir dizi döndürdüğü için, subgraph endeksleme ve sorgulama performansını önemli ölçüde engeller. -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. +Bunun yerine, olay özelliklerini birleştirmek için `concatI32()` metodunu kullanmalıyız. Bu strateji, çok daha iyi çalışan bir `Bytes` ID ile sonuçlanır. ```typescript export function handleTransfer(event: TransferEvent): void { @@ -73,11 +74,11 @@ export function handleTransfer(event: TransferEvent): void { } ``` -### Sorting With Bytes as IDs +### Bytes'ı ID Olarak Kullanarak Sıralama -Sorting using Bytes as IDs is not optimal as seen in this example query and response. +Bytes'ı ID olarak kullanarak sıralama yapmak, bu örnek sorgu ve yanıtında gördüğümüz gibi ideal değildir. -Query: +Sorgu: ```graphql { @@ -90,7 +91,7 @@ Query: } ``` -Query response: +Sorgu yanıtı: ```json { @@ -119,9 +120,9 @@ Query response: } ``` -The IDs are returned as hex. +ID'ler onaltılık (hex) olarak döndürülür. -To improve sorting, we should create another field on the entity that is a BigInt. +Sıralamayı geliştirmek için varlık üzerinde BigInt türünde başka bir alan oluşturmalıyız. ```graphql type Transfer @entity { @@ -133,9 +134,9 @@ type Transfer @entity { } ``` -This will allow for sorting to be optimized sequentially. +Bu alanı oluşturmak, sıralamanın ardışık olarak optimize edilmesine olanak tanıyacaktır. -Query: +Sorgu: ```graphql { @@ -146,7 +147,7 @@ Query: } ``` -Query Response: +Sorgu Yanıtı: ```json { @@ -169,22 +170,22 @@ Query Response: } ``` -## Conclusion +## Sonuç -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. +Hem Değişmez Varlıklar hem de ID olarak, Bytes kullanmanın subgraph verimliliğini önemli ölçüde artırdığı gösterilmiştir. Özellikle, testlerde sorgu performansında %28'e kadar artış ve endeksleme hızlarında %48'e kadar hızlanma göze çarpmaktadır. -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). +Değişmez Varlıkları ve ID olarak Bytes kullanmak hakkında daha fazla bilgiyi Edge & Node'da Yazılım Mühendisi olan David Lutterkort'un bu blog yazısında bulabilirsiniz: [İki Basit Subgraph Performans İyileştirmesi](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). -## Subgraph Best Practices 1-6 +## Subgraph Örnek Uygulamalar 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Subgraph Budama ile Sorgu Hızını İyileştirin](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [@derivedFrom Kullanarak Endeksleme ve Sorgu Yanıt Hızını Artırın](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Değişmez Varlıklar ve Bytes ID'ler Kullanarak Endeksleme ve Sorgu Performansını Artırın](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Endeksleme Hızını `eth_calls`'den Kaçınarak İyileştirin](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Zaman Serileri ve Bütünleştirme ile Basitleştirin ve Optimize Edin](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Hızlı Düzeltme Dağıtımı için Aşılama Kullanın](/subgraphs/cookbook/grafting-hotfix/) From 98fb422a228ea3d58dc9deed022ca078bf02556f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:34 -0500 Subject: [PATCH 0687/1534] New translations immutable-entities-bytes-as-ids.mdx (Ukrainian) --- .../uk/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/uk/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/uk/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From 0049c3f98e6005d82cacda52a2d749cf1cfee630 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:35 -0500 Subject: [PATCH 0688/1534] New translations immutable-entities-bytes-as-ids.mdx (Chinese Simplified) --- .../zh/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/zh/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/zh/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From d7f05375114cb63fad63cff98e2ab559bda98c02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:36 -0500 Subject: [PATCH 0689/1534] New translations immutable-entities-bytes-as-ids.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ur/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/ur/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From d373fc9e26fa216fcbbbaccd2f4422f74af19683 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:37 -0500 Subject: [PATCH 0690/1534] New translations immutable-entities-bytes-as-ids.mdx (Vietnamese) --- .../vi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/vi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/vi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From ed611446b6b534e0c41cb8454cbb15f9aef32541 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:38 -0500 Subject: [PATCH 0691/1534] New translations immutable-entities-bytes-as-ids.mdx (Marathi) --- .../mr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/mr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..97efaaeec7c0 100644 --- a/website/src/pages/mr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood From efb4473abc29c65168668bde3ec32e76fe002281 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:39 -0500 Subject: [PATCH 0692/1534] New translations immutable-entities-bytes-as-ids.mdx (Hindi) --- .../cookbook/immutable-entities-bytes-as-ids.mdx | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/hi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx index 763e8ec760e6..f2a6e2567a2a 100644 --- a/website/src/pages/hi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: सबग्राफ सर्वश्रेष्ठ प्रथा 3 - अपरिवर्तनीय संस्थाओं और बाइट्स को आईडी के रूप में उपयोग करके अनुक्रमण और क्वेरी प्रदर्शन में सुधार करें। +sidebarTitle: "Subgraph Best Practice 3: Immutable Entities and Bytes as IDs" --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { Transfer एंटिटी को अमूर्त बनाने से, graph-node एंटिटी को अधिक कुशलता से संसाधित कर सकता है, जिससे indexing गति और क्वेरी की प्रतिक्रिया में सुधार होता है। -Immutable Entities संरचनाएँ भविष्य में नहीं बदलेंगी। एक आदर्श एंटिटी जो अमूर्त एंटिटी बनेगी, वह एंटिटी होगी जो सीधे ऑन-चेन इवेंट डेटा को लॉग कर रही है, जैसे कि Transfer इवेंट को Transfer एंटिटी के रूप में लॉग किया जाना। +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### हुड के नीचे @@ -90,7 +91,7 @@ Query: } ``` -प्रश्न प्रतिक्रिया: +प्रश्न प्रतिक्रिया: ```json { @@ -135,7 +136,7 @@ type Transfer @entity { इससे क्रमबद्धता को क्रमिक रूप से अनुकूलित करने की अनुमति मिलेगी। -Query +Query: ```graphql { @@ -146,7 +147,7 @@ Query } ``` -प्रश्न प्रतिक्रिया: +प्रश्न प्रतिक्रिया: ```json { From 3985c09873a734aee3b468402fa3f1fa257db2bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:40 -0500 Subject: [PATCH 0693/1534] New translations near.mdx (Romanian) --- website/src/pages/ro/subgraphs/cookbook/near.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/near.mdx b/website/src/pages/ro/subgraphs/cookbook/near.mdx index 23a936406e7b..6060eb27e761 100644 --- a/website/src/pages/ro/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/near.mdx @@ -12,7 +12,7 @@ This guide is an introduction to building subgraphs indexing smart contracts on The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account From 6eb57995a33c23eb4d4118614ffb7f1a6beb0a6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:41 -0500 Subject: [PATCH 0694/1534] New translations near.mdx (French) --- .../src/pages/fr/subgraphs/cookbook/near.mdx | 226 +++++++++--------- 1 file changed, 113 insertions(+), 113 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/near.mdx b/website/src/pages/fr/subgraphs/cookbook/near.mdx index 746a00050407..90faae315cd7 100644 --- a/website/src/pages/fr/subgraphs/cookbook/near.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/near.mdx @@ -2,7 +2,7 @@ title: Construction de subgraphs sur NEAR --- -Ce guide est une introduction à la construction de subgraphs indexant des contrats intelligents sur la [blockchain NEAR](https://docs.near.org/). +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## Que signifie NEAR ? @@ -10,9 +10,9 @@ Ce guide est une introduction à la construction de subgraphs indexant des contr ## Que sont les subgraphs NEAR ? -Le Graph donne aux développeurs des outils pour traiter les événements de la blockchain et rendre les données résultantes facilement disponibles via une API GraphQL, connue individuellement comme un subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable de traiter les événements NEAR, ce qui signifie que les développeurs NEAR peuvent désormais construire des subgraphs pour indexer leurs smart contracts. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Les subgraphs sont basés sur des événements, ce qui signifie qu'ils écoutent et traitent les événements de la chaîne. Il existe actuellement deux types de gestionnaires pour les subgraphs NEAR : +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Gestionnaires de blocs : ceux-ci sont exécutés à chaque nouveau bloc - Gestionnaires de reçus : exécutés à chaque fois qu'un message est exécuté sur un compte spécifié @@ -23,19 +23,19 @@ Les subgraphs sont basés sur des événements, ce qui signifie qu'ils écoutent ## Construction d'un subgraph NEAR -`@graphprotocol/graph-cli` est un outil en ligne de commande pour construire et déployer des subgraphs. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts` est une bibliothèque de types spécifiques aux subgraphs. +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -Le développement du subgraph NEAR nécessite `graph-cli` à partir de la version `0.23.0` et `graph-ts` à partir de la version `0.23.0`. +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > La construction d'un subgraph NEAR est très similaire à la construction d'un subgraph qui indexe Ethereum. La définition d'un subgraph comporte trois aspects : -**subgraph.yaml** : le manifeste du subgraph, définissant les sources de données d'intérêt et la manière dont elles doivent être traitées. NEAR est un nouveau `type` de source de données. +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql** : un fichier de schéma qui définit quelles données sont stockées pour votre subgraph, et comment les interroger via GraphQL. Les exigences pour les subgraphs NEAR sont couvertes par la [documentation existante](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. @@ -43,127 +43,127 @@ Lors du développement du subgraph, il y a deux commandes clés : ```bash $ graph codegen # génère des types à partir du fichier de schéma identifié dans le manifeste -$ graph build # génère le Web Assembly à partir des fichiers AssemblyScript, et prépare tous les fichiers de subgraphs dans un dossier /build +$ graph build # génère le Web Assembly à partir des fichiers AssemblyScript, et prépare tous les fichiers de subgraphes dans un dossier /build ``` ### Définition du manifeste du subgraph -Le manifeste de subgraph (`subgraph.yaml`) identifie les sources de données pour le subgraph, les déclencheurs d'intérêt et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Voici un exemple de manifeste de subgraph pour un subgraph NEAR: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml -specVersion: 0.0.2 -schema: - file: ./src/schema.graphql # lien vers le fichier de schéma -dataSources: - - kind: near - network: near-mainnet - source: - account: app.good-morning.near # Cette source de données surveillera ce compte - startBlock: 10662188 # Requis pour NEAR - mapping: - apiVersion: 0.0.5 - language: wasm/assemblyscript - blockHandlers: - - handler: handleNewBlock # le nom de la fonction dans le fichier de mapping - receiptHandlers: - - handler: handleReceipt # le nom de la fonction dans le fichier de mappage - file: ./src/mapping.ts # lien vers le fichier contenant les mappings Assemblyscript +specVersion : 0.0.2 +schema : + file : ./src/schema.graphql # lien vers le fichier de schéma +dataSources : + - kind : near + network : near-mainnet + source : + account : app.good-morning.near # Cette source de données surveillera ce compte + startBlock : 10662188 # Requis pour NEAR + mapping : + apiVersion : 0.0.5 + language : wasm/assemblyscript + blockHandlers : + - handler : handleNewBlock # le nom de la fonction dans le fichier de mapping + receiptHandlers : + - handler : handleReceipt # le nom de la fonction dans le fichier de mappage + file : ./src/mapping.ts # lien vers le fichier contenant les mappings Assemblyscript ``` -- Les subgraphs NEAR introduisent un nouveau `type` de source de données (`near`) +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- Les sources de données NEAR introduisent un champ `source.accounts` facultatif alternatif, qui contient des suffixes et des préfixes facultatifs. Au moins un préfixe ou un suffixe doit être spécifié, ils correspondront respectivement à n'importe quel compte commençant ou se terminant par la liste de valeurs. L'exemple ci-dessous correspondrait : `[app|good].*[morning.near|morning.testnet]`. Si seule une liste de préfixes ou de suffixes est nécessaire, l'autre champ peut être omis. +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml comptes: - préfixes: - - application - - bien - suffixes: - - matin.près - - matin.testnet + préfixes : + - application + - bien + suffixes : + - matin.près + - matin.testnet ``` Les fichiers de données NEAR prennent en charge deux types de gestionnaires : -- `blockHandlers` : s'exécute sur chaque nouveau bloc NEAR. Aucun `source.account` n'est requis. +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). -### La Définition du schema +### Définition de schéma -La définition du schema décrit la structure de la base de données de subgraphs résultante et les relations entre les entités. Ceci est indépendant de la source de données originale. Vous trouverez plus de détails sur la définition du schema des subgraph [ici](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). -### Les Cartographies d'AssemblyScript +### Cartographies AssemblyScript -Les gestionnaires de traitement des événements sont écrits dans l'[AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). ```typescript -classe ExecutionOutcome { - gaz brûlé : u64, - blockHash : octets, - identifiant : octets, - logs : Array, - Id de réception : tableau , - jetonsBurnt : BigInt, - exécuteurId : chaîne, - } - -classe ActionReceipt { - Id prédécesseur : chaîne, - Id récepteur : chaîne, - identifiant : CryptoHash, - signataire : chaîne, - prix du gaz : BigInt, - OutputDataReceivers : Array, - inputDataIds : tableau, - actions : Tableau, - } - -classe BlockHeader { - taille: u64, - prevHeight : u64,// Toujours zéro lorsque la version < V3 - epochId : octets, - nextEpochId : octets, - morceauxInclus: u64, - hachage : octets, - prevHash : octets, - horodatageNanosec : u64, - randomValue : octets, - prix du gaz : BigInt, - approvisionnement total : BigInt, - dernière version du protocole : u32, - } - -classe ChunkHeader { - gazUtilisé: u64, - limite de gaz : u64, - Id de fragment : u64, - chunkHash : octets, - prevBlockHash : octets, - balanceBurnt : BigInt, - } - -bloc de classe { - auteur : chaîne, - en-tête : BlockHeader, - morceaux : Array, - } - -classe ReçuAvecRésultat { - résultat : ExecutionOutcome, - reçu : ActionReceipt, - bloquer: bloquer, - } +class ExecutionOutcome { + gasBurnt: u64, + blockHash: Bytes, + id: Bytes, + logs: Array, + receiptIds: Array, + tokensBurnt: BigInt, + executorId: string, + } + +class ActionReceipt { + predecessorId: string, + receiverId: string, + id: CryptoHash, + signerId: string, + gasPrice: BigInt, + outputDataReceivers: Array, + inputDataIds: Array, + actions: Array, + } + +class BlockHeader { + height: u64, + prevHeight: u64,// Always zero when version < V3 + epochId: Bytes, + nextEpochId: Bytes, + chunksIncluded: u64, + hash: Bytes, + prevHash: Bytes, + timestampNanosec: u64, + randomValue: Bytes, + gasPrice: BigInt, + totalSupply: BigInt, + latestProtocolVersion: u32, + } + +class ChunkHeader { + gasUsed: u64, + gasLimit: u64, + shardId: u64, + chunkHash: Bytes, + prevBlockHash: Bytes, + balanceBurnt: BigInt, + } + +class Block { + author: string, + header: BlockHeader, + chunks: Array, + } + +class ReceiptWithOutcome { + outcome: ExecutionOutcome, + receipt: ActionReceipt, + block: Block, + } ``` -Ces types sont passés au bloc & gestionnaires de reçus : +These types are passed to block & receipt handlers: -- Les gestionnaires de blocs reçoivent un `Block` -- Les gestionnaires de reçus reçoivent un `ReceiptWithOutcome` +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. @@ -171,9 +171,9 @@ This includes a new JSON parsing function - logs on NEAR are frequently emitted ## Déploiement d'un subgraph NEAR -Une fois que vous avez construit un subgraph, il est temps de le déployer sur Graph Node pour l'indexation. Les subgraphs NEAR peuvent être déployés sur n'importe quel nœud The Graph `>=v0.26.x` (cette version n'a pas encore été marquée & et publiée). +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). -Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: +Subgraph Studio et l'Indexeur de mise à niveau sur The Graph Network prennent en charge actuellement l'indexation du mainnet et du testnet NEAR en bêta, avec les noms de réseau suivants : - `near-mainnet` - `near-testnet` @@ -182,7 +182,7 @@ More information on creating and deploying subgraphs on Subgraph Studio can be f As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -Une fois votre subgraph créé, vous pouvez le déployer en utilisant la commande CLI `graph deploy` : +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,17 +228,17 @@ Nous fournirons bientôt plus d'informations sur l'utilisation des composants ci ## Interrogation d'un subgraph NEAR -Le point de terminaison GraphQL pour les subgraphs NEAR est déterminé par la définition du schéma, avec l'interface API existante. Veuillez consulter la [documentation de l'API GraphQL](/subgraphs/querying/graphql-api/) pour plus d'informations. +The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. -## Des exemples de subgraphs +## Exemples de subgraphs -Here are some example subgraphs for reference: +Voici quelques exemples de subgraphs pour référence : -[NEAR Blocs](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[Des reçus de NEAR](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## QFP +## FAQ ### Comment fonctionne la bêta ? @@ -254,12 +254,12 @@ Actuellement, seuls les déclencheurs de blocage et de réception sont pris en c ### Les gestionnaires de reçus se déclencheront-ils pour les comptes et leurs sous-comptes ? -Si un `compte` est spécifié, il correspondra uniquement au nom exact du compte. Il est possible de faire correspondre des sous-comptes en spécifiant un champ `comptes`, avec des `suffixes` et des `préfixes` spécifiés pour faire correspondre les comptes et sous-comptes, par exemple ce qui suit correspondrait à tous les sous-comptes `mintbase1.near` : +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: ```yaml comptes: - suffixes: - - mintbase1.near + suffixes : + - mintbase1.near ``` ### Les subgraphs NEAR peuvent-ils faire des appels de view aux comptes NEAR pendant les mappings? @@ -276,8 +276,8 @@ La fonctionnalité "pending" n'est pas encore prise en charge pour les subgraphs ### Ma question n'a pas reçu de réponse, où puis-je obtenir plus d'aide concernant la création de subgraphs NEAR ? -S'il s'agit d'une question générale sur le développement de subgraphs, il y a beaucoup plus d'informations dans le reste de la [Documentation du développeur](/subgraphs/quick-start/). Sinon, veuillez rejoindre [The Graph Protocol Discord](https://discord.gg/graphprotocol) et poser votre question sur le canal #near ou par e-mail à near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Les Références -- [Documentation du développeur NEAR](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From 707db00927abd0d9b4aaa50a104b8873389be657 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:42 -0500 Subject: [PATCH 0695/1534] New translations near.mdx (Spanish) --- .../src/pages/es/subgraphs/cookbook/near.mdx | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/near.mdx b/website/src/pages/es/subgraphs/cookbook/near.mdx index a134eed6b560..67db2b1278cb 100644 --- a/website/src/pages/es/subgraphs/cookbook/near.mdx +++ b/website/src/pages/es/subgraphs/cookbook/near.mdx @@ -2,7 +2,7 @@ title: Construcción de subgrafos en NEAR --- -Esta guía es una introducción a la construcción de subgrafos que indexan contratos inteligentes en la [blockchain NEAR ](https://docs.near.org/). +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## ¿Qué es NEAR? @@ -10,9 +10,9 @@ Esta guía es una introducción a la construcción de subgrafos que indexan cont ## ¿Qué son los subgrafos NEAR? -The Graph brinda a los desarrolladores herramientas para procesar eventos de blockchain y hacer que los datos resultantes estén fácilmente disponibles a través de una API GraphQL, conocido individualmente como subgrafo. [Graph Node](https://github.com/graphprotocol/graph-node) ahora puede procesar eventos NEAR, lo que significa que los desarrolladores de NEAR ahora pueden crear subgrafos para indexar sus contratos inteligentes. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Los subgrafos se basan en eventos, lo que significa que escuchan y procesan los eventos on-chain. Actualmente hay dos tipos de handlers compatibles con los subgrafos NEAR: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Handlers de bloques: se ejecutan en cada nuevo bloque - Handlers de recibos: se realizan cada vez que se ejecuta un mensaje en una cuenta específica @@ -23,19 +23,19 @@ Los subgrafos se basan en eventos, lo que significa que escuchan y procesan los ## Construcción de un subgrafo NEAR -`@graphprotocol/graph-cli` es una herramienta de línea de comandos para crear e implementar subgrafos. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts` es una biblioteca de tipos específicos de subgrafos. +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -El desarrollo de subgrafos NEAR requiere `graph-cli` versión superior a `0.23.0` y `graph-ts` versión superior a `0.23.0`. +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > Construir un subgrafo NEAR es muy similar a construir un subgrafo que indexa Ethereum. Hay tres aspectos de la definición de subgrafo: -**subgraph.yaml:** el manifiesto del subgrafo, que define las fuentes de datos de interés y cómo deben procesarse. NEAR es un nuevo `tipo` de fuente de datos. +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** un archivo de esquema que define qué datos se almacenan para su subgrafo y cómo consultarlos a través de GraphQL. Los requisitos para los subgrafos NEAR están cubiertos por [la documentación existente](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. @@ -48,7 +48,7 @@ $ graph build # genera Web Assembly a partir de los archivos de AssemblyScript y ### Definición de manifiesto del subgrafo -El manifiesto del subgrafo (`subgraph.yaml`) identifica las fuentes de datos para el subgrafo, los activadores de interés y las funciones que deben ejecutarse en respuesta a esos activadores. Consulta a continuación un manifiesto de subgrafo de ejemplo para un subgrafo NEAR: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Los subgrafos NEAR introducen un nuevo `tipo` de fuente de datos (`near`) +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- Las fuentes de datos NEAR introducen un campo alternativo opcional `source.accounts`, que contiene sufijos y prefijos opcionales. Se debe especificar al menos el prefijo o el sufijo, coincidirán con cualquier cuenta que comience o termine con la lista de valores, respectivamente. El siguiente ejemplo coincidiría con: `[app|good].*[morning.near|morning.testnet]`. Si solo se necesita una lista de prefijos o sufijos, se puede omitir el otro campo. +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml accounts: @@ -87,16 +87,16 @@ accounts: Las fuentes de datos NEAR admiten dos tipos de handlers: -- `blockHandlers`: se ejecuta en cada nuevo bloque NEAR. No se requiere `source.account`. +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). ### Definición de esquema -La definición de esquema describe la estructura de la base de datos de subgrafos resultante y las relaciones entre las entidades. Esto es independiente de la fuente de datos original. Hay más detalles sobre la definición de esquema de subgrafo [aquí](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### Asignaciones de AssemblyScript -Los handlers para procesar eventos están escritos en [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -160,10 +160,10 @@ class ReceiptWithOutcome { } ``` -Estos tipos se pasan a block & handlers de recibos: +These types are passed to block & receipt handlers: -- Los handlers de bloques recibirán un `Block` -- Los handlers de recibos recibirán un `ReceiptWithOutcome` +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. @@ -171,7 +171,7 @@ This includes a new JSON parsing function - logs on NEAR are frequently emitted ## Deployando un subgrafo NEAR -Una vez que hayas creado un subgrafo, es hora de implementarlo en Graph Node para indexarlo. Los subgrafos NEAR se pueden implementar en cualquier Graph Node `>=v0.26.x` (esta versión aún no se ha etiquetado & publicada). +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: @@ -182,7 +182,7 @@ More information on creating and deploying subgraphs on Subgraph Studio can be f As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -Una vez que se haya creado su subgrafo, puede implementar su subgrafo usando el comando CLI `graph deployment`: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,17 +228,17 @@ Pronto proporcionaremos más información sobre cómo ejecutar los componentes a ## Consultando un subgrafo NEAR -El endpoint de GraphQL para los subgrafos NEAR está determinado por la definición del esquema, con la interfaz API existente. Visite la [documentación de la API de GraphQL](/subgraphs/querying/graphql-api/) para obtener más información. +The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Subgrafos de ejemplo -A continuación se presentan algunos ejemplos de subgrafos como referencia: +Here are some example subgraphs for reference: -[Bloques NEAR](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[Recibos NEAR](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## Preguntas frecuentes +## FAQ ### ¿Cómo funciona la beta? @@ -254,7 +254,7 @@ Actualmente, solo se admiten los activadores de Bloque y Recibo. Estamos investi ### ¿Se activarán los handlers de recibos para las cuentas y sus subcuentas? -Si se especifica una `cuenta`, solo coincidirá con el nombre exacto de la cuenta. Es posible hacer coincidir subcuentas especificando un campo `cuentas`, con `sufijos` y `prefijos` especificados para hacer coincidir cuentas y subcuentas, por ejemplo lo siguiente coincidiría con todas las subcuentas `mintbase1.near`: +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: ```yaml accounts: @@ -280,4 +280,4 @@ If it is a general question about subgraph development, there is a lot more info ## Referencias -- [Documentación para desarrolladores de NEAR](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From c0bcff710cfb73536a23e82f965bd0f24b07a341 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:44 -0500 Subject: [PATCH 0696/1534] New translations near.mdx (Arabic) --- .../src/pages/ar/subgraphs/cookbook/near.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/near.mdx b/website/src/pages/ar/subgraphs/cookbook/near.mdx index fe6cbf2223aa..bdbe8e518a6b 100644 --- a/website/src/pages/ar/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/near.mdx @@ -2,7 +2,7 @@ title: بناء Subgraphs على NEAR --- -هذا الدليل عبارة عن مقدمة لبناء subgraphs تقوم بفهرسة العقود الذكية على [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## ما هو NEAR؟ @@ -10,9 +10,9 @@ title: بناء Subgraphs على NEAR ## ماهي NEAR subgraphs؟ -يوفر The Graph للمطورين أدوات لمعالجة أحداث blockchain وجعل البيانات الناتجة متاحة بسهولة عبر GraphQL API ، والمعروفة باسم subgraph. أصبح [ Graph Node ](https://github.com/graphprotocol/graph-node) الآن قادرًا على معالجة أحداث NEAR ، مما يعني أن مطوري NEAR يمكنهم الآن إنشاء subgraphs لفهرسة عقودهم الذكية (smart contracts). +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -تعتمد الـ Subgraphs على الأحداث ، مما يعني أنها تستمع إلى أحداث on-chain ثم تعالجها. يوجد حاليًا نوعان من المعالجات المدعومة لـ NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - معالجات الكتل(Block handlers): يتم تشغيلها على كل كتلة جديدة - معالجات الاستلام (Receipt handlers): يتم تشغيلها في كل مرة يتم فيها تنفيذ رسالة على حساب محدد @@ -25,15 +25,15 @@ title: بناء Subgraphs على NEAR `@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`graphprotocol/graph-ts@` هي مكتبة لأنواع خاصة بـ subgraph. +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -تطوير NEAR subgraph يتطلب `graph-cli` بإصدار أعلى من `0.23.0` و `graph-ts` بإصدار أعلى من `0.23.0`. +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. هناك ثلاثة جوانب لتعريف الـ subgraph: -**subgraph.yaml:** الـ subgraph manifest ، وتحديد مصادر البيانات ذات الأهمية ، وكيف يجب أن تتم معالجتها.علما أن NEAR هو `نوع` جديد لمصدر البيانات. +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. **schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). @@ -70,7 +70,7 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs يقدم `نوعا ` جديدا من مصدر بيانات (`near`) +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. - NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. @@ -87,7 +87,7 @@ accounts: مصادر بيانات NEAR تدعم نوعين من المعالجات: -- `blockHandlers`: يتم تشغيلها على كل كتلة NEAR جديدة. لا يتطلب `source.account`. +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). ### تعريف المخطط @@ -96,7 +96,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript Mappings -تمت كتابة المعالجات(handlers) الخاصة بمعالجة الأحداث بـ[ AssemblyScript ](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -162,8 +162,8 @@ class ReceiptWithOutcome { These types are passed to block & receipt handlers: -- معالجات الكتلة ستتلقى`Block` -- معالجات الاستلام ستتلقى`ReceiptWithOutcome` +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. @@ -171,7 +171,7 @@ This includes a new JSON parsing function - logs on NEAR are frequently emitted ## نشر NEAR Subgraph -بمجرد امتلاكك لـ subgraph، فقد حان الوقت لنشره في Graph Node للفهرسة. يمكن نشر NEAR subgraphs في اصدارات Graph Node `>=v0.26.x` (لم يتم وضع علامة(tag) على هذا الإصدار ولم يتم إصداره بعد). +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: @@ -182,7 +182,7 @@ More information on creating and deploying subgraphs on Subgraph Studio can be f As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -بمجرد إنشاء الـ subgraph الخاص بك ، يمكنك نشره باستخدام الأمر `graph deploy`: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -238,7 +238,7 @@ Here are some example subgraphs for reference: [NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## الأسئلة الشائعة +## FAQ ### How does the beta work? @@ -280,4 +280,4 @@ If it is a general question about subgraph development, there is a lot more info ## المراجع -- [وثائق مطور NEAR](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From 328520d57073afee5016def9a6e3406795e51436 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:45 -0500 Subject: [PATCH 0697/1534] New translations near.mdx (Czech) --- .../src/pages/cs/subgraphs/cookbook/near.mdx | 66 +++++++++---------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/near.mdx b/website/src/pages/cs/subgraphs/cookbook/near.mdx index 010aeefc48f9..dc65c11da629 100644 --- a/website/src/pages/cs/subgraphs/cookbook/near.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/near.mdx @@ -2,42 +2,42 @@ title: Vytváření podgrafů v NEAR --- -Tato příručka je úvodem do vytváření subgrafů indexujících chytré kontrakty na [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## Co je NEAR? -[NEAR](https://near.org/) je platforma pro chytré smlouvy, která slouží k vytváření decentralizovaných aplikací. Další informace najdete v [oficiální dokumentaci](https://docs.near.org/concepts/basics/protocol). +[NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. ## Co jsou podgrafy NEAR? -"The Graph poskytuje vývojářům nástroje pro zpracování událostí na blockchainu a snadný přístup k výsledným datům prostřednictvím GraphQL API, známého individuálně jako podgraf. [Graph Node](https://github.com/graphprotocol/graph-node) je nyní schopen zpracovávat události na síti NEAR, což znamená, že vývojáři na NEAR mohou nyní vytvářet podgrafy pro indexaci svých chytrých kontraktů." +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Podgrafy jsou založeny na událostech, což znamená, že naslouchají událostem v řetězci a následně je zpracovávají. V současné době jsou pro podgrafy NEAR podporovány dva typy zpracovatelů: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Obsluhy bloků: jsou spouštěny při každém novém bloku. - Obsluhy příjmu: spouštějí se pokaždé, když je zpráva provedena na zadaném účtu. -[Z dokumentace NEAR](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): +[From the NEAR documentation](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): > Příjemka je jediným objektem, který lze v systému použít. Když na platformě NEAR hovoříme o "zpracování transakce", znamená to v určitém okamžiku "použití účtenky". ## Sestavení podgrafu NEAR -`@graphprotocol/graph-cli` je nástroj příkazového řádku pro vytváření a nasazování subgrafů. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts` je knihovna typů specifických pro podgrafy. +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -Vývoj podgrafů NEAR vyžaduje `graph-cli` nad verzí `0.23.0` a `graph-ts` nad verzí `0.23.0`. +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > Vytváření subgrafu NEAR je velmi podobné vytváření subgrafu, který indexuje Ethereum. Definice podgrafů má tři aspekty: -**subgraph.yaml:** manifest podgrafu definující zdroje dat, které vás zajímají, a způsob jejich zpracování. NEAR je nový `druh` zdroje dat. +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** soubor se schématem, který definuje, jaká data jsou uložena pro váš podgraf, a jak je možné je dotazovat pomocí GraphQL. Požadavky na podgrafy NEAR jsou pokryty [existující dokumentací](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -**Mapování AssemblyScript:** [Kód AssemblyScript](/subgraphs/developing/creating/graph-ts/api/), který převádí data událostí na entity definované ve vašem schématu. Podpora NEAR zavádí datové typy specifické pro NEAR a nové funkce pro parsování JSON. +**AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. Při vývoji podgrafů existují dva klíčové příkazy: @@ -48,7 +48,7 @@ $ graph build # vygeneruje webové sestavení ze souborů AssemblyScript a přip ### Definice podgrafu Manifest -Manifest podgrafu (`subgraph.yaml`) identifikuje zdroje dat pro podgraf, spouštěče zájmu a funkce, které by měly být spuštěny v reakci na tyto spouštěče. Příklad manifestu podgrafu pro podgraf NEAR naleznete níže: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Podgrafy NEAR představují nový `druh` zdroje dat (`near`) -- `Síť` by měla odpovídat síti v hostitelském uzlu Graf. V Podgraf Studio je hlavní síť NEAR `near-mainnet` a testovací síť NEAR je `near-testnet` -- Zdroje dat NEAR zavádějí volitelné pole `source.account`, které je čitelným ID odpovídajícím [účtu NEAR](https://docs.near.org/concepts/protocol/account-model). Může to být účet nebo podúčet. -- NEAR datové zdroje představují alternativní volitelné pole `source.accounts`, které obsahuje volitelné přípony a předpony. Musí být specifikována alespoň jedna z předpony nebo přípony, které odpovídají jakémukoli účtu začínajícímu nebo končícímu uvedenými hodnotami. Příklad níže by odpovídal: `[app|good].*[morning.near|morning.testnet]`. Pokud je potřeba pouze seznam předpon nebo přípon, druhé pole lze vynechat. +- NEAR subgraphs introduce a new `kind` of data source (`near`) +- The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` +- NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml accounts: @@ -87,18 +87,18 @@ accounts: Zdroje dat NEAR podporují dva typy zpracovatelů: -- `blockHandlers`: spustí se na každém novém bloku NEAR. Není vyžadován žádný `source.account`. -- `receiptHandlers`: spustí se na každé příjemce, kde je `účet zdroje dat` příjemcem. Všimněte si, že se zpracovávají pouze přesné shody ([podúčty](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) musí být přidány jako nezávislé zdroje dat). +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. +- `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). ### Definice schématu -Definice schématu popisuje strukturu výsledné databáze podgrafů a vztahy mezi entitami. Toto je agnostika původního zdroje dat. Další podrobnosti o definici schématu podgrafu naleznete [zde](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript Mapování -Obslužné programy pro zpracování událostí jsou napsány v jazyce [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). -Indexování NEAR zavádí do [API AssemblyScript](/subgraphs/developing/creating/graph-ts/api/) datové typy specifické pro NEAR. +NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). ```typescript @@ -160,18 +160,18 @@ class ReceiptWithOutcome { } ``` -Tyto typy jsou předány do block & obsluha účtenek: +These types are passed to block & receipt handlers: -- Obsluhy bloků obdrží `Block` -- Obsluhy příjmu obdrží `ReceiptWithOutcome` +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` -V opačném případě mají vývojáři podgrafů NEAR během provádění mapování k dispozici zbytek [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. -To zahrnuje novou funkci pro parsování JSON - log na NEAR jsou často emitovány jako serializované JSONs. Nová funkce `json.fromString(...)` je k dispozici jako součást [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api), která umožňuje vývojářům snadno zpracovávat tyto log. +This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## Nasazení podgrafu NEAR -Jakmile máte sestavený podgraf, je čas jej nasadit do Graph Node k indexování. Podgrafy NEAR lze nasadit do libovolného Graph Node `>=v0.26.x` (tato verze ještě nebyla označena & vydána). +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: @@ -182,7 +182,7 @@ More information on creating and deploying subgraphs on Subgraph Studio can be f As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -Po vytvoření podgrafu můžete podgraf nasadit pomocí příkazu `graph deploy` CLI: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,13 +228,13 @@ Brzy vám poskytneme další informace o provozu výše uvedených komponent. ## Dotazování podgrafu NEAR -Koncový bod GraphQL pro podgrafy NEAR je určen definicí schématu se stávajícím rozhraním API. Další informace naleznete v [dokumentaci GraphQL API](/subgraphs/querying/graphql-api/). +The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Příklady podgrafů Zde je několik příkladů podgrafů: -[NEAR bloky](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) [NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) @@ -254,7 +254,7 @@ V současné době jsou podporovány pouze spouštěče Blok a Příjem. Zkoumá ### Budou se obsluhy příjmu spouštět pro účty a jejich podúčty? -Pokud je zadán `účet`, bude odpovídat pouze přesnému názvu účtu. Je možné spárovat podúčty zadáním pole `účty` s `příponami` a `předponami`, aby odpovídaly například účtům a podúčtům následující by odpovídalo všem podúčtům `mintbase1.near`: +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: ```yaml accounts: @@ -276,8 +276,8 @@ Pro podgrafy NEAR zatím nejsou podporovány čekající funkce. V mezidobí mů ### Moje otázka nebyla zodpovězena, kde mohu získat další pomoc při vytváření podgrafů NEAR? -Pokud se jedná o obecnou otázku týkající se vývoje podgrafů, je zde mnohem více informací ve zbytku [dokumentace pro vývojáře](/subgraphs/quick-start/). Jinak se prosím připojte k [The Graph Protocol Discord](https://discord.gg/graphprotocol) a zeptejte se na #near kanálu nebo e-mailem near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Odkazy: -- [ Dokumentace pro vývojáře NEAR](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From 82c5374f42edc0a0d82e6846c0f6b0c267e19888 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:46 -0500 Subject: [PATCH 0698/1534] New translations near.mdx (German) --- website/src/pages/de/subgraphs/cookbook/near.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/near.mdx b/website/src/pages/de/subgraphs/cookbook/near.mdx index 3a79be20752f..d748e4787563 100644 --- a/website/src/pages/de/subgraphs/cookbook/near.mdx +++ b/website/src/pages/de/subgraphs/cookbook/near.mdx @@ -12,7 +12,7 @@ This guide is an introduction to building subgraphs indexing smart contracts on The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -96,7 +96,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript-Mappings -Die Handler für die Ereignisverarbeitung sind in [AssemblyScript](https://www.assemblyscript.org/) geschrieben. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). From b25a7886a2959a0626e892879d022547bdf9d9ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:46 -0500 Subject: [PATCH 0699/1534] New translations near.mdx (Italian) --- website/src/pages/it/subgraphs/cookbook/near.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/cookbook/near.mdx b/website/src/pages/it/subgraphs/cookbook/near.mdx index 97aff10ff8d2..809574aa81cd 100644 --- a/website/src/pages/it/subgraphs/cookbook/near.mdx +++ b/website/src/pages/it/subgraphs/cookbook/near.mdx @@ -12,7 +12,7 @@ This guide is an introduction to building subgraphs indexing smart contracts on The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account From 486b537fd7558e84d4607eef3490007a6da09087 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:47 -0500 Subject: [PATCH 0700/1534] New translations near.mdx (Japanese) --- .../src/pages/ja/subgraphs/cookbook/near.mdx | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/near.mdx b/website/src/pages/ja/subgraphs/cookbook/near.mdx index f20f17ac7ba4..6f4069566be2 100644 --- a/website/src/pages/ja/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/near.mdx @@ -2,7 +2,7 @@ title: NEAR でサブグラフを作成する --- -このガイドは、[NEAR blockchain](https://docs.near.org/)上のスマートコントラクトを索引するサブグラフを構築するための入門書です。 +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## NEAR とは? @@ -10,9 +10,9 @@ title: NEAR でサブグラフを作成する ## NEAR サブグラフとは? -Graph は、ブロックチェーンのイベントを処理し、その結果得られたデータを GraphQL API を介して簡単に利用できるようにするためのツールを開発者に提供するもので、個別にはサブグラフとして知られています。[Graph Node](https://github.com/graphprotocol/graph-node)が NEAR イベントを処理できるようになったということは、NEAR の開発者がスマートコントラクトの指標となるサブグラフを構築できるようになったということです。 +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -サブグラフはイベントベースなので、チェーン上のイベントをリッスンしてから処理します。現在、NEAR サブグラフでサポートされているハンドラーは 2 種類あります: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - ブロックハンドラ:新しいブロックごとに実行されます - レシートハンドラ:指定されたアカウントでメッセージが実行されるたびに実行されます @@ -23,19 +23,19 @@ Graph は、ブロックチェーンのイベントを処理し、その結果 ## NEAR サブグラフの構築 -`@graphprotocol/graph-cli`は、サブグラフを構築・展開するためのコマンドラインツールです。 +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts`は、サブグラフ固有の型のライブラリです。 +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -NEAR サブグラフの開発には、バージョン`0.23.0`以上の`graph-cli`と、バージョン`0.23.0`以上の`graph-ts`が必要です。 +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > NEAR サブグラフの構築は、Ethereum のインデックスを作成するサブグラフの構築と非常によく似ています。 サブグラフの定義には 3 つの側面があります: -**subgraph.yaml:**: サブグラフのマニフェストで、対象となるデータソースとその処理方法を定義します。NEAR は新しい`種類`のデータソースです。 +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:**: サブグラフのためにどのようなデータが保存されているか、そして GraphQL を介してどのようにクエリを行うかを定義するスキーマファイル。NEAR サブグラフの要件は、[既存のドキュメント](/developing/creating-a-subgraph/#the-graphql-schema)でカバーされています。 +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. @@ -48,7 +48,7 @@ $ グラフ ビルド # AssemblyScript ファイルから Web アセンブリを ### サブグラフマニフェストの定義 -サブグラフ マニフェスト (`subgraph.yaml`) は、サブグラフのデータ ソース、対象のトリガー、およびそれらのトリガーに応答して実行する必要がある関数を識別します。 NEAR サブグラフのサブグラフ マニフェストの例については、以下を参照してください。 +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR サブグラフは、新しい`種類`のデータソース(`near`) を導入する +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- NEARのデータソースは、オプションの `source.accounts` フィールドを導入し、オプションのサフィックスとプレフィックスを含んでいます。少なくともプレフィックスまたはサフィックスを指定する必要があり、それぞれ値のリストで始まるまたは終わる任意のアカウントにマッチします。以下の例では、以下のようにマッチします。`[app|good].*[morning.near|morning.testnet]`. リストだけが必要な場合は、他のフィールドを省略することができます。 +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml accounts: @@ -87,16 +87,16 @@ accounts: NEAR データソースは 2 種類のハンドラーをサポートしています: -- `blockHandlers`: 新しい NEAR ブロックごとに実行され、`source.account` は必要ありません。 +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). ### スキーマ定義 -スキーマの定義は、結果として得られるサブグラフ・データベースの構造と、エンティティ間の関係を記述する。これは、元のデータソースに依存しません。スキーマ定義の詳細は、[ こちら](/developing/creating-a-subgraph/#the-graphql-schema)にあります。 +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript マッピング -イベントを処理するためのハンドラは[AssemblyScript](https://www.assemblyscript.org/)で書かれています。 +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -160,10 +160,10 @@ class ReceiptWithOutcome { } ``` -これらのタイプは、ブロックハンドラとレシートハンドラに渡されます: +These types are passed to block & receipt handlers: -- ブロックハンドラーは、`Block`を受け取ります -- レシートハンドラーは`ReceiptWithOutcome`を受け取ります +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. @@ -171,7 +171,7 @@ This includes a new JSON parsing function - logs on NEAR are frequently emitted ## NEAR サブグラフの展開 -サブグラフを作成したら、それをインデックス作成のためにグラフ ノードにデプロイします。 NEAR サブグラフは、`>=v0.26.x` 以降のグラフ ノードにデプロイできます (このバージョンはまだタグ付けされていないか、リリースされていません)。 +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: @@ -182,7 +182,7 @@ More information on creating and deploying subgraphs on Subgraph Studio can be f As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -サブグラフが作成されたら、CLI コマンドの`graph deploy`を使ってサブグラフをデプロイすることができます。 +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,17 +228,17 @@ NEAR のインデックスを作成するグラフノードの運用には、以 ## NEAR サブグラフへのクエリ -NEAR サブグラフの GraphQL エンドポイントは、既存の API インターフェイスを用いて、スキーマ定義によって決定されます。詳細は、[GraphQL API documentation](/subgraphs/querying/graphql-api/) をご覧ください。 +The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## サブグラフの例 Here are some example subgraphs for reference: -[NEARブロック](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[NEAR 領収書](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## よくある質問 +## FAQ ### ベータ版はどのように機能しますか? @@ -254,7 +254,7 @@ NEAR サポートはベータ版です。統合の改善を続ける中で、API ### 領収書ハンドラーは、アカウントとそのサブアカウントに対してトリガーされますか? -もし`account`が指定された場合、それは正確なアカウント名にのみマッチします。`accounts` フィールドを指定して、`suffixes` と `prefixes` でアカウントとサブアカウントにマッチさせることが可能で、例えば、次のようにするとすべての `mintbase1.near` サブアカウントにマッチすることになります。 +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: ```yaml accounts: @@ -276,8 +276,8 @@ accounts: ### 私の質問に対する回答がありません。NEAR サブグラフの作成に関するヘルプはどこで入手できますか? -もし、サブグラフ開発に関する一般的な質問であれば、[開発者ドキュメント](/subgraphs/quick-start/)に多くの情報があります。それ以外の場合は、[The Graph Protocol Discord](https://discord.gg/graphprotocol)に参加し、#nearチャンネルで質問するか、near@thegraph.comまでお寄せください。 +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## 参考文献 -- [NEAR 開発者ドキュメント](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From 2d152fb3b2d9e18e6faa9972d8d6efc78e459a58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:48 -0500 Subject: [PATCH 0701/1534] New translations near.mdx (Korean) --- website/src/pages/ko/subgraphs/cookbook/near.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/near.mdx b/website/src/pages/ko/subgraphs/cookbook/near.mdx index 23a936406e7b..6060eb27e761 100644 --- a/website/src/pages/ko/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/near.mdx @@ -12,7 +12,7 @@ This guide is an introduction to building subgraphs indexing smart contracts on The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account From d4c7e75d4289e1cf2fa46736d46644074fdfc46d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:49 -0500 Subject: [PATCH 0702/1534] New translations near.mdx (Dutch) --- website/src/pages/nl/subgraphs/cookbook/near.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/near.mdx b/website/src/pages/nl/subgraphs/cookbook/near.mdx index 4a4ac06f2e86..75f966e7a597 100644 --- a/website/src/pages/nl/subgraphs/cookbook/near.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/near.mdx @@ -12,7 +12,7 @@ This guide is an introduction to building subgraphs indexing smart contracts on The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account From 5e5c1bb5a7866a5ee9342bbe45a3cb241030ff8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:50 -0500 Subject: [PATCH 0703/1534] New translations near.mdx (Polish) --- website/src/pages/pl/subgraphs/cookbook/near.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/near.mdx b/website/src/pages/pl/subgraphs/cookbook/near.mdx index 23a936406e7b..6060eb27e761 100644 --- a/website/src/pages/pl/subgraphs/cookbook/near.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/near.mdx @@ -12,7 +12,7 @@ This guide is an introduction to building subgraphs indexing smart contracts on The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account From 9b0ff2db4a601fb6500b033151dcb9ebcdab7bf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:51 -0500 Subject: [PATCH 0704/1534] New translations near.mdx (Portuguese) --- .../src/pages/pt/subgraphs/cookbook/near.mdx | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/near.mdx b/website/src/pages/pt/subgraphs/cookbook/near.mdx index e2fa982b8aac..58143e87a809 100644 --- a/website/src/pages/pt/subgraphs/cookbook/near.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/near.mdx @@ -2,32 +2,32 @@ title: Construção de Subgraphs na NEAR --- -Este guia é uma introdução à construção de subgraphs a indexar contratos inteligentes na [blockchain NEAR](https://docs.near.org/). +Este guia é uma introdução à construção de subgraphs para indexar contratos inteligentes na blockchain [NEAR](https://docs.near.org/). ## O que é NEAR? -O [NEAR](https://near.org/) é uma plataforma de contratos inteligentes para a construção de aplicativos descentralizados. Visite a [documentação oficial](https://docs.near.org/concepts/basics/protocol) para mais informações. +[NEAR](https://near.org/) é uma plataforma de contrato inteligente para construir aplicativos descentralizados. Visite a [documentação oficial](https://docs.near.org/concepts/basics/protocol) para mais informações. ## O que são subgraphs na NEAR? -Os programadores do The Graph recebem ferramentas para processar eventos em blockchain e disponibilizar facilmente os dados resultantes através de uma API GraphQL, conhecida individualmente como um subgraph. O [Graph Node](https://github.com/graphprotocol/graph-node) agora é capaz de processar eventos no NEAR, significando que programadores na NEAR agora podem construir subgraphs para indexar seus contratos inteligentes. +The Graph fornece aos programadores ferramentas para processar eventos de blockchain e tornar os dados resultantes facilmente disponíveis por meio de uma API GraphQL, conhecida individualmente como subgraph. O [Graph Node](https://github.com/graphprotocol/graph-node) agora é capaz de processar eventos NEAR, o que significa que programadores da NEAR agora podem criar subgraphs para indexar seus contratos inteligentes. -Subgraphs são baseados em eventos; quer dizer, eles esperam e então processam eventos on-chain. Atualmente há dois tipos de handlers que funcionam para subgraphs no NEAR: +Subgraphs são baseados em eventos; ou seja, eles esperam e então processam eventos on-chain. Atualmente há dois tipos de handlers que funcionam para subgraphs na NEAR: - Handlers de blocos: executados em todos os blocos novos - Handlers de recibos: Executados sempre que uma mensagem é executada numa conta especificada -[Da documentação do NEAR](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): +[Da documentação da NEAR](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): > Um Recibo é o único objeto acionável no sistema. Quando falamos de "processar uma transação" na plataforma NEAR, em algum ponto isto eventualmente significa "aplicar recibos". ## Construindo um Subgraph no NEAR -`@graphprotocol/graph-cli` é uma ferramenta de linha de comando para a construção e lançamento de subgraphs. +`@graphprotocol/graph-cli` é uma ferramenta de linha de comando para a construção e implantação de subgraphs. `@graphprotocol/graph-ts` é uma biblioteca de tipos específicos a subgraphs. -O desenvolvimento de subgraphs no NEAR exige o `graph-cli` acima da versão `0.23.0`, e o `graph-ts` acima da versão `0.23.0`. +A programação de subgraphs no NEAR exige o `graph-cli` acima da versão `0.23.0`, e o `graph-ts` acima da versão `0.23.0`. > Construir um subgraph NEAR é um processo muito parecido com a construção de um subgraph que indexa o Ethereum. @@ -35,11 +35,11 @@ Há três aspectos de definição de subgraph: **subgraph.yaml:** o manifest do subgraph, que define as fontes de dados de interesse e como elas devem ser processadas. A NEAR é uma nova espécie (`kind`) de fonte de dados. -**schema.graphql:** um arquivo schema que define quais dados são armazenados para o seu subgraph, e como consultá-los via GraphQL. Os requerimentos para subgraphs no NEAR são cobertos pela [documentação existente](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** um arquivo de schema que define quais dados são armazenados para o seu subgraph e como consultá-los via GraphQL. Os requisitos para subgraphs NEAR são cobertos pela [documentação existente](/developing/creating-a-subgraph/#the-graphql-schema). -**AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. +**Mapeamentos de AssemblyScript:** [Código AssemblyScript](/subgraphs/developing/creating/graph-ts/api/) que traduz dos dados do evento para as entidades definidas no seu esquema. O apoio à NEAR introduz tipos de dados específicos da NEAR e novas funções de análise JSON. -Existem dois comandos importantes durante o desenvolvimento de um subgraph: +Durante o desenvolvimento de um subgraph, existem dois comandos importantes: ```bash $ graph codegen # gera tipos do arquivo de schema identificado no manifest @@ -48,7 +48,7 @@ $ graph build # gera Web Assembly dos arquivos AssemblyScript, e prepara todos o ### Definição de Manifest de Subgraph -O manifest do subgraph (`subgraph.yaml`) identifica as fontes de dados ao subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a tais gatilhos. Veja abaixo um exemplo de manifest de subgraph para um subgraph na NEAR: +O manifest do subgraph (`subgraph.yaml`) identifica as fontes de dados para o subgraph, os gatilhos de interesse, e as funções que devem ser executadas em resposta a tais gatilhos. Veja abaixo um exemplo de manifest para um subgraph na NEAR: ```yaml specVersion: 0.0.2 @@ -71,9 +71,9 @@ dataSources: ``` - Subgraphs na NEAR introduzem um novo tipo (`kind`) de fonte de dados (`near`) -- A rede (`network`) deve corresponder a uma rede no Graph Node hospedeiro. No Subgraph Studio, a mainnet do NEAR é `near-mainnet`, e a testnet é `near-testnet` -- Fontes de dados no NEAR introduzem um campo `source.account` opcional: um ID legível a humanos que corresponde a uma [conta no NEAR](https://docs.near.org/concepts/protocol/account-model). Isto pode ser uma conta ou subconta. -- Fontes de dados na NEAR introduzem um campo `source.accounts` opcional, que contém sufixos e prefixos opcionais. No mínimo, deve ser especificado o prefixo ou o sufixo, já que eles procurarão qualquer conta que comece ou termine com a lista respectiva de valores. Combinaria o exemplo abaixo: `[app|good].*[morning.near|morning.testnet]`. Se só for necessária uma lista de prefixos ou sufixos, o outro campo pode ser omitido. +- O `network` deve corresponder a uma rede no Graph Node hóspede. No Subgraph Studio, a mainnet da NEAR é `near-mainnet`, e a testnet da NEAR é `near-testnet` +- Fontes de dados na NEAR introduzem um campo `source.account` opcional: uma ID legível a humanos que corresponde a uma [conta na NEAR](https://docs.near.org/concepts/protocol/account-model). Isto pode ser uma conta ou subconta. +- As fontes de dados da NEAR introduzem um campo alternativo `source.accounts` opcional, que contém sufixos e prefixos opcionais. Pelo menos prefix ou sufixo deve ser especificado, eles corresponderão a qualquer conta que comece ou termine com a lista de valores, respectivamente. O exemplo abaixo corresponderia a: `[app|good].*[morning.near|morning.testnet]`. Se apenas uma lista de prefixos ou sufixos for necessária, o outro campo pode ser omitido. ```yaml accounts: @@ -92,13 +92,13 @@ As fontes de dados na NEAR apoiam duas categorias de handlers: ### Definição de Schema -A definição do schema descreve a estrutura do banco de dados do subgraph resultante e os relacionamentos entre entidades. Isto é agnóstico da fonte de dados original. Mais detalhes na definição do schema de subgraph [aqui](/developing/creating-a-subgraph/#the-graphql-schema). +A definição de Schema descreve a estrutura do banco de dados resultado do subgraph, e os relacionamentos entre entidades. Isto é agnóstico da fonte de dados original. Para mais detalhes na definição de schema de subgraph, [clique aqui](/developing/creating-a-subgraph/#the-graphql-schema). ### Mapeamentos em AssemblyScript -Os handlers para o processamento de eventos são escritos em [AssemblyScript](https://www.assemblyscript.org/). +Os handlers para processamento de eventos estão escritos em [AssemblyScript](https://www.assemblyscript.org/). -NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +A indexação da NEAR introduz tipos de dados específicos desse ecossistema à [API do AssemblyScript](/subgraphs/developing/creating/graph-ts/api/). ```typescript @@ -165,28 +165,28 @@ Estes tipos são repassados para handlers de blocos e recibos: - Handlers de blocos receberão um `Block` - Handlers de recibos receberão um `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Caso contrário, o resto da [API do AssemblyScript](/subgraphs/developing/creating/graph-ts/api/) está à disposição dos programadores de subgraph na NEAR, durante a execução dos mapeamentos. -This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. +Isto inclui uma nova função de análise em JSON: logs na NEAR são frequentemente emitidos como JSONs em string. A nova função json.fromString(...) está disponível como parte da [API JSON](/subgraphs/developing/creating/graph-ts/api/#json-api) para que programadores processem estes logs com mais facilidade. ## Lançando um Subgraph na NEAR -Quando tiver um subgraph pronto, chegará a hora de lançá-lo no Graph Node para indexar. Subgraphs na NEAR podem ser lançados em qualquer Graph Node `>=v0.26.x` (esta versão ainda não foi marcada ou lançada). +Quando tiver um subgraph pronto, chegará a hora de implantá-lo no Graph Node para indexar. Subgraphs na NEAR podem ser implantados em qualquer Graph Node `>=v0.26.x` (esta versão ainda não foi marcada ou liberada). O Subgraph Studio e o Indexador de atualização na Graph Network apoiam atualmente a indexação da mainnet e da testnet do NEAR em beta, com os seguintes nomes de rede: - `near-mainnet` - `near-testnet` -Saiba mais sobre a criação e lançamento de subgraphs no Subgraph Studio [aqui](/deploying/deploying-a-subgraph-to-studio/). +Para mais informações sobre criar e implantar subgraphs no Subgraph Studio, clique [aqui](/deploying/deploying-a-subgraph-to-studio/). Para começo de conversa, o primeiro passo consiste em "criar" o seu subgraph - isto só precisa ser feito uma vez. No Subgraph Studio, isto pode ser feito do [seu Painel](https://thegraph.com/studio/): "Criar um subgraph". -Quando o seu subgraph estiver pronto, lance o seu subgraph com o comando de CLI `graph deploy`: +Quando o seu subgraph estiver pronto, implante o seu subgraph com o comando de CLI `graph deploy`: ```sh $ graph create --node # cria um subgraph num Graph Node local (no Subgraph Studio, isto é feito via a interface) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # envia os arquivos de construção a um endpoint IPFS especificado, e depois edita o subgraph a um Graph Node especificado com base no hash IPFS do manifest +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # sobe os arquivos do build a um ponto final IPFS especificado, e implanta o subgraph num Graph Node com base no hash IPFS do manifest ``` A configuração do nódulo dependerá de onde o subgraph será lançado. @@ -228,17 +228,17 @@ Em breve, falaremos mais sobre como executar os componentes acima. ## Como Consultar um Subgraph na NEAR -O endpoint da GraphQL para subgraphs na NEAR é determinado pela definição do schema, com a interface existente da API. Mais informações na [documentação da API GraphQL](/subgraphs/querying/graphql-api/). +O ponto final do GraphQL para subgraphs na NEAR é determinado pela definição do schema, com a interface existente da API. Visite a [documentação da API da GraphQL](/subgraphs/querying/graphql-api/) para mais informações. ## Exemplos de Subgraphs Aqui estão alguns exemplos de subgraphs para referência: -[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[Blocos da NEAR](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[Recibos da NEAR](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## Perguntas Frequentes +## FAQ ### Como o beta funciona? @@ -276,8 +276,8 @@ No momento, não há apoio à funcionalidade de pendências para subgraphs na NE ### A minha pergunta não foi respondida. Onde posso conseguir mais ajuda sobre construir subgraphs na NEAR? -Se esta for uma pergunta geral sobre desenvolvimento de subgraphs, há mais informações no resto da [documentação para programadores](/subgraphs/quick-start/). Caso contrário, entre no [Discord do Graph Protocol](https://discord.gg/graphprotocol) e pergunte no canal #near, ou mande a sua pergunta para near@thegraph.com. +Se esta for uma pergunta geral sobre programação de subgraphs, há mais informações no resto da [documentação para programadores](/subgraphs/quick-start/). Caso contrário, entre no [Discord do Graph Protocol](https://discord.gg/graphprotocol) e pergunte no canal #near, ou mande a sua pergunta para near@thegraph.com. ## Referências -- [Documentação para programadores da NEAR](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [Documentação para programadores na NEAR](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From 277feed825dc700e8e18df060aeecc1972db88a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:53 -0500 Subject: [PATCH 0705/1534] New translations near.mdx (Russian) --- .../src/pages/ru/subgraphs/cookbook/near.mdx | 136 +++++++++--------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/near.mdx b/website/src/pages/ru/subgraphs/cookbook/near.mdx index d815fb9cd7fe..ac22a9f8c015 100644 --- a/website/src/pages/ru/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/near.mdx @@ -2,42 +2,42 @@ title: Создание субграфов на NEAR --- -Это руководство представляет собой введение в построение подграфов, индексирующих смарт-контракты на [блокчейне NEAR](https://docs.near.org/). +Это руководство является введением в создание субграфов для индексирования смарт-контрактов на [блокчейне NEAR](https://docs.near.org/). ## Что такое NEAR? -[NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. +[NEAR](https://near.org/) — это платформа для смарт-контрактов, предназначенная для создания децентрализованных приложений. Для получения дополнительной информации ознакомьтесь с [официальной документацией](https://docs.near.org/concepts/basics/protocol). -## Что такое NEAR подграфы? +## Что такое NEAR субграфы? -The Graph предоставляет разработчикам инструменты для обработки событий блокчейна и делает результирующие данные легко доступными через GraphQL API, известный индивидуально как субграф. [Graph Node](https://github.com/graphprotocol/graph-node) теперь способен обрабатывать события NEAR, что означает, что разработчики NEAR теперь могут создавать субграфы для индексации своих смарт-контрактов. +The Graph предоставляет разработчикам инструменты для обработки событий блокчейна и упрощает доступ к полученным данным через API GraphQL, известный также как субграф. [Graph Node](https://github.com/graphprotocol/graph-node) теперь способен обрабатывать события NEAR, что позволяет разработчикам NEAR создавать субграфы для индексирования своих смарт-контрактов. -Субграфы основаны на событиях, что означает, что они отслеживают и затем обрабатывают события в сети. В настоящее время для подграфов NEAR поддерживаются два типа обработчиков: +Субграфы основаны на событиях, что означает, что они отслеживают и обрабатывают события в блокчейне. В настоящее время для субграфов NEAR поддерживаются два типа обработчиков: - Обработчики блоков: они запускаются для каждого нового блока -- Обработчики квитанций: запускаются каждый раз, когда сообщение выполняется в указанной учетной записи +- Обработчики поступлений: запускаются каждый раз, когда сообщение выполняется в указанной учетной записи -[From the NEAR documentation](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): +[Из документации NEAR](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): -> Квитанция - это единственный объект, к которому можно применить действие в системе. Когда мы говорим об "обработке транзакции" на платформе NEAR, это в конечном итоге означает "применение квитанций" в какой-то момент. +> Поступление - это единственный объект, к которому можно применить действие в системе. Когда мы говорим об "обработке транзакции" на платформе NEAR, это в конечном итоге означает "применение поступлений" в какой-то момент. -## Создание NEAR подграфа +## Создание NEAR субграфа -`@graphprotocol/graph-cli` - это инструмент командной строки для построения и развертывания субграфов. +`@graphprotocol/graph-cli` — это инструмент командной строки для создания и развертывания субграфов. -`@graphprotocol/graph-ts` - это библиотека типов, специфичных для субграфов. +`@graphprotocol/graph-ts` — это библиотека типов, специфичных для субграфов. -Для разработки NEAR субграфа требуется `graph-cli` выше версии `0.23.0` и `graph-ts` выше версии `0.23.0`. +Для разработки субграфов на платформе NEAR требуется `graph-cli` версии выше `0.23.0` и `graph-ts` версии выше `0.23.0`. -> Построение NEAR сабграфа очень похоже на построение сабграфа, индексирующего Ethereum. +> Построение NEAR субграфа очень похоже на построение субграфа, индексирующего Ethereum. -Существует три аспекта определения подграфа: +Существует три аспекта определения субграфа: -**subgraph.yaml:** манифест подграфа, определяющий источники данных, представляющие интерес, и как они должны быть обработаны. NEAR - это новый `вид` источника данных. +**subgraph.yaml:** манифест субграфа, определяющий источники данных и способы их обработки. NEAR является новым `kind` (типом) источника данных. -**schema.graphql:** файл схемы, который определяет, какие данные хранятся для вашего подграфа и как запрашивать их через GraphQL. Требования к подграфам NEAR рассматриваются в [существующей документации](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** файл схемы, который определяет, какие данные хранятся в Вашем субграфе и как к ним можно обращаться через GraphQL. Требования для субграфов NEAR описаны в [существующей документации](/developing/creating-a-subgraph/#the-graphql-schema). -**AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. +**Мэппинги на AssemblyScript:** [код на AssemblyScript](/subgraphs/developing/creating/graph-ts/api/), который преобразует данные событий в элементы, определенные в Вашей схеме. Поддержка NEAR вводит специфичные для NEAR типы данных и новую функциональность для парсинга JSON. Во время разработки субграфа есть две ключевые команды: @@ -46,9 +46,9 @@ $ graph codegen # генерирует типы из файла схемы, ук $ graph build # генерирует Web Assembly из файлов AssemblyScript и подготавливает все файлы субграфа в папке /build ``` -### Определение манифеста подграфа +### Определение манифеста субграфа -Манифест подграфа (`subgraph.yaml`) определяет источники данных для подграфа, интересующие триггеры и функции, которые должны быть запущены в ответ на эти триггеры. Ниже приведен пример манифеста подграфа для подграфа NEAR: +Манифест субграфа (`subgraph.yaml`) определяет источники данных для субграфа, интересующие триггеры и функции, которые должны быть выполнены в ответ на эти триггеры. Пример манифеста субграфа для NEAR представлен ниже: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR подграфы вводят новый ` вид` источника данных (`near`) -- The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` -- NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- Источниками данных NEAR вводят альтернативное необязательное поле `source.accounts`, которое содержит необязательные суффиксы и префиксы. Необходимо указать по крайней мере префикс или суффикс, они будут соответствовать любой учетной записи, начинающейся или заканчивающейся списком значений соответственно. Приведенный ниже пример соответствовал бы: `[app|good].*[morning.near|morning.testnet]`. Если необходим только список префиксов или суффиксов, другое поле можно опустить. +- Субграфы NEAR вводят новый тип источника данных (`near`) +- `network` должен соответствовать сети на хостинговой Graph Node. В Subgraph Studio майннет NEAR называется `near-mainnet`, а теснет NEAR — `near-testnet` +- Источники данных NEAR содержат необязательное поле `source.account`, которое представляет собой удобочитаемый идентификатор, соответствующий [учетной записи NEAR] (https://docs.near.org/concepts/protocol/account-model). Это может быть как основной аккаунт, так и суб-аккаунт. +- Источники данных NEAR вводят альтернативное необязательное поле `source.accounts`, которое содержит необязательные префиксы и суффиксы. Необходимо указать хотя бы один префикс или суффикс, они будут соответствовать любому аккаунту, начинающемуся или заканчивающемуся на значения из списка соответственно. Приведенный ниже пример будет совпадать с: `[app|good].*[morning.near|morning.testnet]`. Если необходим только список префиксов или суффиксов, другое поле можно опустить. ```yaml accounts: @@ -87,18 +87,18 @@ accounts: Источники данных NEAR поддерживают два типа обработчиков: -- `blockHandlers` запускаются при каждом новом ближайшем блоке. Не требуется `source.account`. -- `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). +- `blockHandlers`: выполняется для каждого нового блока NEAR. `source.account` не требуется. +- `receiptHandlers`: выполняется при каждом получении, где `source.account` источника данных является получателем. Обратите внимание, что обрабатываются только точные совпадения ([субаккаунты](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) должны быть добавлены как независимые источники данных). ### Определение схемы -Определение схемы описывает структуру результирующей базы данных подграфов и взаимосвязи между объектами. Это не зависит от исходного источника данных. Более подробная информация об определении схемы подграфа [ приведена здесь](/developing/creating-a-subgraph/#the-graphql-schema). +Определение схемы описывает структуру итоговой базы данных субграфа и отношения между объектами. Это не зависит от исходного источника данных. Более подробную информацию об определении схемы субграфа можно найти [здесь](/developing/creating-a-subgraph/#the-graphql-schema). -### Сопоставления AssemblyScript +### Мэппинги AssemblyScript -Обработчики событий написаны на языке [AssemblyScript](https://www.assemblyscript.org/). +Обработчики для обработки событий написаны на [AssemblyScript](https://www.assemblyscript.org/). -NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +Индексирование NEAR вводит специфичные для NEAR типы данных в [API AssemblyScript](/subgraphs/developing/creating/graph-ts/api/). ```typescript @@ -125,7 +125,7 @@ class ActionReceipt { class BlockHeader { height: u64, - prevHeight: u64,// Always zero when version < V3 + prevHeight: u64,// Всегда 0 для версии < V3 epochId: Bytes, nextEpochId: Bytes, chunksIncluded: u64, @@ -160,36 +160,36 @@ class ReceiptWithOutcome { } ``` -Эти типы передаются обработчикам блоков и квитанций: +Эти типы передаются в обработчики блоков и поступлений: - Обработчики блоков получат `Block` -- Обработчики квитанций получат `ReceiptWithOutcome` +- Обработчики поступлений получат `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +В остальном, весь [API для AssemblyScript](/subgraphs/developing/creating/graph-ts/api/) доступен разработчикам субграфов для NEAR во время выполнения мэппинга. -This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. +Это включает в себя новую функцию для парсинга JSON — логи в NEAR часто выводятся как строковые JSON. Новая функция `json.fromString(...)` доступна в рамках [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api), что позволяет разработчикам легко обрабатывать эти логи. ## Развертывание NEAR субграфа -Как только у вас будет построенный субграф, пришло время развернуть его в Graph Node для индексации. NEAR субграфы могут быть развернуты на любой ноде Graph `>=v0.26.x` (эта версия еще не была помечена & выпущена). +Как только Ваш субграф будет создан, наступает время развернуть его на Graph Node для индексирования. Субграфы NEAR можно развернуть на любом Graph Node версии `>=v0.26.x` (эта версия еще не отмечена и не выпущена). -Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: +Subgraph Studio и Индексатор обновлений в The Graph Network в настоящее время поддерживают индексирование основной и тестовой сети NEAR в бета-версии со следующими именами сетей: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +Дополнительную информацию о создании и развертывании субграфов в Subgraph Studio можно найти [здесь](/deploying/deploying-a-subgraph-to-studio/). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +В качестве краткого примера — первый шаг заключается в "создании" Вашего субграфа — это нужно сделать только один раз. В Subgraph Studio это можно сделать на Вашей [панели управления](https://thegraph.com/studio/), выбрав опцию "Создать субграф". -Как только ваш подграф создан, вы можете развернуть его с помощью команды `graph deploy` CLI: +После того как субграф создан, его можно развернуть с помощью команды `graph deploy` в CLI: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # создает субграф на локальной Graph Node (в Subgraph Studio это делается через UI) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # загружает файлы сборки на указанную конечную точку IPFS, а затем разворачивает субграф на указанной Graph Node на основе хеша манифеста IPFS ``` -Конфигурация ноды будет зависеть от того, где развертывается подграф. +Конфигурация ноды будет зависеть от того, где развертывается субграф. ### Subgraph Studio @@ -198,13 +198,13 @@ graph auth graph deploy ``` -### Local Graph Node (based on default configuration) +### Локальная Graph Node (на основе конфигурации по умолчанию) ```sh graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Как только ваш подграф будет развернут, он будет проиндексирован Graph Node. Вы можете проверить его прогресс, сделав запрос к самому субграфу: +Как только Ваш субграф будет развернут, он будет проиндексирован Graph Node. Вы можете проверить его прогресс, сделав запрос к самому субграфу: ```graphql { @@ -216,9 +216,9 @@ graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 Date: Fri, 14 Feb 2025 12:56:53 -0500 Subject: [PATCH 0706/1534] New translations near.mdx (Swedish) --- .../src/pages/sv/subgraphs/cookbook/near.mdx | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/near.mdx b/website/src/pages/sv/subgraphs/cookbook/near.mdx index f2bf41799d74..833a4b7c997d 100644 --- a/website/src/pages/sv/subgraphs/cookbook/near.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/near.mdx @@ -2,7 +2,7 @@ title: Bygger subgrafer på NEAR --- -Den här guiden är en introduktion till att bygga subgrafer som indexerar smarta kontrakt på [NEAR blockchain](https://docs.near.org/). +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## Vad är NEAR? @@ -10,9 +10,9 @@ Den här guiden är en introduktion till att bygga subgrafer som indexerar smart ## Vad är NEAR subgrafer? -The Graph ger utvecklare verktyg för att bearbeta blockchain-händelser och göra den resulterande informationen lätt tillgänglig via ett GraphQL API, individuellt känt som en subgraf. [Graph Node](https://github.com/graphprotocol/graph-node) kan nu bearbeta NEAR-händelser, vilket innebär att NEAR-utvecklare nu kan bygga subgrafer för att indexera sina smarta kontrakt. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgrafer är händelsebaserade, vilket innebär att de lyssnar efter och sedan bearbetar händelser i kedjan. Det finns för närvarande två typer av hanterare som stöds för NEAR subgrafer: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Blockhanterare: dessa körs på varje nytt block - Kvittohanterare: körs varje gång ett meddelande körs på ett angivet konto @@ -23,32 +23,32 @@ Subgrafer är händelsebaserade, vilket innebär att de lyssnar efter och sedan ## Att bygga en NEAR Subgraf -`@graphprotocol/graph-cli` är ett kommandoradsverktyg för att bygga och distribuera subgrafer. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts` är ett bibliotek med subgrafspecifika typer. +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -NEAR subgrafutveckling kräver `graph-cli` ovan version `0.23.0` och `graph-ts` ovan version `0.23.0`. +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > Att bygga en NEAR subgraf är mycket lik att bygga en subgraf som indexerar Ethereum. Det finns tre aspekter av subgraf definition: -**subgraph.yaml:** undergraf manifestet, som definierar datakällorna av intresse och hur de ska behandlas. NEAR är en ny `typ` av datakälla. +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** en schema fil som definierar vilken data som lagras för din subgraf, och hur man frågar den via GraphQL. Kraven för NEAR undergrafer täcks av [den befintliga dokumentationen](/developing/creating-a-subgraph/#the-graphql-schema). +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. Under subgrafutveckling finns det två nyckelkommandon: ```bash -$ graph codegen # genererar typer från schema filen som identifieras i manifestet -$ graph build # genererar Web Assembly från AssemblyScript filerna och förbereder alla subgraffiler i en /build-mapp +$ graph codegen # generates types from the schema file identified in the manifest +$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder ``` ### Definition av subgraf manifestet -Subgrafmanifestet (`subgraph.yaml`) identifierar datakällorna för subgrafen, utlösare av intresse och funktionerna som ska köras som svar på dessa utlösare. Se nedan för ett exempel på subgraf manifest för en NEAR subgraf: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR undergrafer introducerar en ny `typ` av datakälla (`near`) +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- NEAR datakällor introducerar ett alternativt valfritt `source.accounts`-fält, som innehåller valfria suffix och prefix. Minst prefix eller suffix måste anges, de kommer att matcha alla konton som börjar eller slutar med värdelistan. Exemplet nedan skulle matcha: `[app|bra].*[morning.near|morning.testnet]`. Om endast en lista med prefix eller suffix är nödvändig kan det andra fältet utelämnas. +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml accounts: @@ -87,16 +87,16 @@ accounts: NEAR datakällor stöder två typer av hanterare: -- `blockHandlers`: kör på varje nytt NEAR-block. Inget `source.account` krävs. +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). ### Schema Definition -Schemadefinition beskriver strukturen för den resulterande subgraf databasen och relationerna mellan enheter. Detta är agnostiskt för den ursprungliga datakällan. Det finns mer information om definition av subgraf schema [här](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript mappningar -Hanterarna för bearbetning av händelser är skrivna i [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -160,10 +160,10 @@ class ReceiptWithOutcome { } ``` -Dessa typer skickas till block & kvittohanterare: +These types are passed to block & receipt handlers: -- Blockhanterare kommer att få ett `Block` -- Kvittohanterare kommer att få ett `ReceiptWithOutcome` +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. @@ -171,7 +171,7 @@ This includes a new JSON parsing function - logs on NEAR are frequently emitted ## Utplacera en NEAR Subgraf -När du har en byggd subgraf är det dags att distribuera den till Graph Node för indexering. NEAR undergrafer kan distribueras till alla Graph Node `>=v0.26.x` (den här versionen har ännu inte taggats & släppts). +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: @@ -182,7 +182,7 @@ More information on creating and deploying subgraphs on Subgraph Studio can be f As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -När din subgraf har skapats kan du distribuera din subgraf genom att använda `graph deploy` CLI kommandot: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,15 +228,15 @@ Vi kommer snart att ge mer information om hur du kör ovanstående komponenter. ## Fråga efter en NEAR subgraf -GraphQL slutpunkten för NEAR undergrafer bestäms av schemadefinitionen, med det befintliga API gränssnittet. Besök [GraphQL API-dokumentationen](/subgraphs/querying/graphql-api/) för mer information. +The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## Exempel på subgrafer -Här är några exempel på subgrafer som referens: +Here are some example subgraphs for reference: -[NEAR Block](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[NEAR kvitton](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) ## FAQ @@ -254,7 +254,7 @@ För närvarande stöds endast blockerings- och kvittoutlösare. Vi undersöker ### Kommer kvittohanterare att utlösa för konton och deras underkonton? -Om ett `account` anges kommer det bara att matcha det exakta kontonamnet. Det är möjligt att matcha underkonton genom att ange ett `accounts`-fält, med `suffixes` och `prefixes` angivna för att matcha konton och underkonton, till exempel följande skulle matcha alla `mintbase1.near` underkonton: +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: ```yaml accounts: @@ -276,8 +276,8 @@ Väntande funktionalitet stöds ännu inte för NEAR subgrafer. Under tiden kan ### Min fråga har inte besvarats, var kan jag få mer hjälp med att bygga NEAR subgrafer? -Om det är en generell fråga om subgraffutveckling finns det mycket mer information i resten av [Utvecklardokumentationen](/subgraphs/quick-start/). Annars, var vänlig och anslut dig till [The Graph Protocol Discord](https://discord.gg/graphprotocol) och ställ din fråga i kanalen #near eller skicka ett e-postmeddelande till near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/subgraphs/quick-start/). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Referenser -- [NEAR utvecklar dokumentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From b3104f43a823b2c24e1fd208a11a345f7d3901d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:54 -0500 Subject: [PATCH 0707/1534] New translations near.mdx (Turkish) --- .../src/pages/tr/subgraphs/cookbook/near.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/near.mdx b/website/src/pages/tr/subgraphs/cookbook/near.mdx index 8fe610b7f462..42ecff83f4f1 100644 --- a/website/src/pages/tr/subgraphs/cookbook/near.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/near.mdx @@ -2,44 +2,44 @@ title: NEAR Üzerinde Subgraphlar Oluşturma --- -Bu rehber, [NEAR blok zincirinde](https://docs.near.org/) akıllı sözleşmeleri indeksleyen subgraphlar oluşturmaya giriş niteliğindedir. +Bu rehber, [NEAR blokzinciri](https://docs.near.org/) üzerindeki akıllı sözleşmeleri endeksleyen subgraph'ler inşa etmeye giriş niteliğindedir. ## NEAR Nedir? -[NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. +[NEAR](https://near.org/), merkezi olmayan uygulamalar geliştirmek için kullanılan bir akıllı sözleşme platformudur. Daha fazla bilgi için [resmi dokümantasyona](https://docs.near.org/concepts/basics/protocol) bakabilirsiniz. ## NEAR subgraphları nedir? -Graph, geliştiricilere blok zinciri olaylarını işlemek ve elde edilen verileri tek tek subgraph olarak bilinen bir GraphQL API aracılığıyla kolayca erişilebilir hale getirmek için araçlar sunar. [Graph Düğümü](https://github.com/graphprotocol/graph-node) artık NEAR olaylarını işleyebiliyor, bu da NEAR geliştiricilerinin artık akıllı sözleşmelerini indekslemek için subgraphlar oluşturabilecekleri anlamına geliyor. +The Graph, geliştiricilere blokzinciri olaylarını işleyip, sonuçtaki veriyi bir GraphQL API'ı (subgraph olarak da bilinir) aracılığıyla kolayca erişilebilir kılacak araçlar sunar. [Graph Düğümü](https://github.com/graphprotocol/graph-node) artık NEAR olaylarını işleyebiliyor, bu da NEAR geliştiricilerinin akıllı sözleşmelerini endekslemek için subgraph'ler oluşturabileceği anlamına gelir. -Subgraphlar olay tabanlıdır, yani zincir üzerindeki olayların etkinliklerini gözler ve ardından işlerler. Şu anda NEAR subgraphları için desteklenen iki tür işleyici vardır: +Subgraph'ler olay tabanlıdır., yani zincir üzerindeki olayları dinler ve sonrasında işlerler. Şu anda NEAR subgraph'leri için desteklenen iki tür işleyici bulunmaktadır: - Blok işleyicileri: Bunlar her yeni blokta çalışır - Makbuz işleyicileri: Belirli bir hesapta her mesaj yürütüldüğünde çalışır -[From the NEAR documentation](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): +[NEAR dokümantasyonundan](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): > Makbuz, sistemdeki eyleme geçirilebilir tek nesnedir. NEAR platformunda "bir işlemin işlenmesinden" bahsettiğimizde, bu nihayetinde bir noktada "makbuzların uygulanması" anlamına gelir. ## NEAR Subgraph'ı Oluşturma -`@graphprotocol/graph-cli`, subgraphları oluşturmak ve dağıtmak için kullanılan bir komut satırı aracıdır. +`@graphprotocol/graph-cli`, subgraph'ler oluşturmak ve dağıtmak için kullanılan bir komut satırı aracıdır. -`@graphprotocol/graph-ts`, bir subgraph özel türler kütüphanesidir. +`@graphprotocol/graph-ts`, subgraph'e özgü türlerden oluşan bir kütüphanedir. -NEAR subgraph'ı geliştirmek, `0.23.0` sürümünden yüksek `graph-cli` ve `0.23.0` sürümünden yüksek `graph-ts` gerektirir. +NEAR ağında subgraph geliştirmek, `graph-cli`'nin `0.23.0` üstü sürümünü ve `graph-ts`'nin `0.23.0` üstü sürümünü gerektirir. > Bir NEAR subgraph'ı oluşturmak, Ethereum'u indeksleyen bir subgraph oluşturmakla çok benzerdir. Subgraph tanımının üç yönü vardır: -**subgraph.yaml:** Veri kaynaklarını ve bunların nasıl işleneceğini tanımlayan subgraph manifestidir. NEAR, yeni bir veri kaynağı türüdür(`kind`). +**subgraph.yaml:** subgraph manifestosudur, ilgi duyulan veri kaynaklarını tanımlar ve bunların nasıl işleneceğini açıklar. NEAR, yeni bir `kind` (tür) veri kaynağıdır. -**schema.graphql:** Subgraph'ınız için hangi verilerin depolandığını ve bunlara GraphQL aracılığıyla nasıl sorgu yapılacağını tanımlayan bir şema dosyası. NEAR subgraph gereksinimleri [mevcut belgelendirmede](/developing/creating-a-subgraph/#the-graphql-schema) ele alınmıştır. +**schema.graphql:** subgraph'iniz için hangi verilerin depolanacağını ve bunların GraphQL kullanılarak nasıl sorgulanacağını tanımlayan şema dosyasıdır. NEAR subgraph'leri için gereksinimler [mevcut dokümantasyon](/developing/creating-a-subgraph/#the-graphql-schema) tarafından kapsanmaktadır. -**AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. +**AssemblyScript Eşlemeleri:** Olay verisini, şemanızda tanımlanan varlıklara dönüştüren [AssemblyScript kodudur](/subgraphs/developing/creating/graph-ts/api/). NEAR desteği, NEAR'a özgü veri türleri ve yeni JSON ayrıştırma işlevi sunar. -Subgraph geliştirme sırasında iki temel komut bulunmaktadır: +Subgraph geliştirme sırasında iki anahtar komut vardır: ```bash $ graph codegen # manifest'de tanımlanan şema dosyasından tipleri üretir @@ -48,7 +48,7 @@ $ graph build # AssemblyScript dosyalarından Web Assembly oluşturur ve tüm su ### Subgraph Manifest Tanımı -Subgraph manifesti (`subgraph.yaml`), subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Bir NEAR subgraph'ı özelinde örnek bir subgraph manifesti için aşağıya bakınız: +Subgraph manifestosu (`subgraph.yaml`), subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Aşağıda bir NEAR subgraph'i için örnek bir subgraph manifestosu bulunmaktadır: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # Assemblyscript eşleştirmelerinin bulunduğu dosyaya bağlantı ``` -- NEAR subgraphları yeni bir veri kaynağı türü(`kind`) olan `near`'ı sunar -- The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` -- NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- NEAR veri kaynakları, opsiyonel son ekler ve ön ekler içeren alternatif bir opsiyonel `source.accounts` alanı sunar. En azından ön ek veya son ek belirtilmelidir, bunlar sırasıyla değer listesiyle başlayan veya biten herhangi bir hesapla eşleşecektir. Aşağıdaki örnek eşleşecektir: `[app|good].*[morning.near|morning.testnet]`. Yalnızca bir ön ek veya son ek listesi gerekliyse, diğer alan atlanabilir. +- NEAR subgraph'leri yeni bir veri kaynağı `kind`'ı (türü) sunar (`near`) +- `network`, subgraph'i sunan Graph Düğümü üzerindeki bir ağa karşılık gelmelidir. Subgraph Studio'da, NEAR'ın ana ağı `near-mainnet`, ve NEAR'ın test ağı `near-testnet`'tir +- NEAR veri kaynakları, [NEAR hesabı](https://docs.near.org/concepts/protocol/account-model) ile ilişkili, insan tarafından okunabilir bir kimlik olan isteğe bağlı `source.account` alanını sunar. Bu, bir hesap veya alt hesap olabilir. +- NEAR veri kaynakları, isteğe bağlı ek `source.accounts` alanını tanıtır. Bu alan isteğe bağlı sonekler ve önekler içerir. En azından bir önek veya sonek belirtilmelidir. Bu ekler ilgili listedeki değerlerle başlayan veya biten herhangi bir hesabı eşleştirirler. Aşağıdaki örnek şunlarla eşleşecektir: `[app|good].*[morning.near|morning.testnet]`. Sadece önekler veya sonekler listesi gerekiyorsa diğer alan atlanabilir. ```yaml accounts: @@ -87,18 +87,18 @@ accounts: NEAR veri kaynakları iki tür işleyiciyi destekler: -- `blockHandlers`: Her yeni NEAR bloğunda çalıştırılır. `source.account` gerekli değildir. -- `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). +- `blockHandlers`: her yeni NEAR blokunda çalıştırılır. `source.account` gerekli değildir. +- `receiptHandlers`: veri kaynağının `source.account`'unun alıcı olduğu her makbuzda çalışır. Makbuz (receipt) teknik bir kavramdır, daha detaylı bilgi için NEAR dokümanlarını inceleyebilirsiniz. Bu noktada, yalnızca tam eşleşmelerin işlendiğine dikkat edin. ([Alt hesaplar](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) bağımsız veri kaynakları olarak eklenmelidir). ### Şema Tanımı -Şema tanımı, ortaya çıkan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri tanımlar. Bu, orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla ayrıntı [burada](/developing/creating-a-subgraph/#the-graphql-schema) bulunmaktadır. +Şema tanımı, ortaya çıkan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri açıklar. Bu, orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla detay [burada](/developing/creating-a-subgraph/#the-graphql-schema) bulunabilir. -### AssemblyScript Eşleştirmeleri +### AssemblyScript Eşlemeleri -Olayları işlemek için işleyiciler [AssemblyScript](https://www.assemblyscript.org/) içinde yazılmıştır. +Olayları işlemek için kullanılan işleyiciler [AssemblyScript](https://www.assemblyscript.org/) ile yazılmıştır. -NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). +NEAR endeksleme, [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) için NEAR'a özgü veri türlerini tanıtır. ```typescript @@ -162,31 +162,31 @@ class ReceiptWithOutcome { Bu türler blok & makbuz işleyicilerine aktarılır: -- Blok işleyicileri bir `Block` alır -- Makbuz işleyicileri bir `ReceiptWithOutcome` alır +- Blok işleyiciler bir `Block` alacaktır +- Makbuz işleyiciler bir `ReceiptWithOutcome` alacaktır -Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. +Aksi takdirde, NEAR subgraph geliştiricileri eşleme yürütme sırasında [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/)'ının geri kalanını kullanabilir. -This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. +Bu, yeni bir JSON ayrıştırma fonksiyonunu içerir - NEAR üzerindeki günlükler sıklıkla dizeleştirilmiş JSON olarak yayılır. Geliştiricilerin bu günlükleri kolayca işlemelerine olanak tanımak için [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) kapsamında yeni bir `json.fromString(...)` fonksiyonu mevcuttur. ## NEAR Subgraph'ını Dağıtma -Bir subgraph oluşturduktan sonra, artık indeksleme için Graph Düğümü'ne dağıtma zamanı gelmiştir. NEAR subgraphları sürümü `>=v0.26.x` (bu sürüm henüz etiketlenmemiş & yayınlanmamıştır) olan herhangi bir Graph Düğümü'ne dağıtılabilir. +Bir subgraph'i oluşturduktan sonra sıradaki adım bu subgraph'i endeksleme için Graph Düğümü'ne dağıtmaktır. NEAR subgraph'leri, herhangi bir Graph Düğümü `>=v0.26.x` sürümüne dağıtılabilir (bu sürüm henüz etiketlenmemiş ve yayımlanmamıştır). -Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: +The Graph Ağı'ndaki Subgraph Studio ve yükseltme Endeksleyicisi şu anda beta olarak NEAR ana ağı ve test ağını endekslemeyi, aşağıdaki ağ isimleriyle desteklemektedir: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). +Subgraph Studio'da subgraph'ler oluşturma ve dağıtma hakkında daha fazla bilgi [burada](/deploying/deploying-a-subgraph-to-studio/) bulunabilir. -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". +Kısa bir ön bilgi olarak - ilk adım subgraph'inizi "oluşturmak"tır - bu sadece bir kez yapılması gereken bir işlemdir. Subgraph Studio'da, [Gösterge Paneliniz](https://thegraph.com/studio/)'deki "Bir subgraph oluştur" kısmında yapılabilir. -Subgraph'ınız oluşturulduktan sonra, `graph deploy` CLI komutunu kullanarak subgraph'ınızı dağıtabilirsiniz: +Subgraph oluşturulduktan sonra, `graph deploy` CLI komutunu kullanarak subgraph'inizi dağıtabilirsiniz: ```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node # yerel bir Graph Düğümünde bir subgraph oluşturur (Subgraph Studio'da, bu işlem UI üzerinden yapılır) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # manifesto IPFS hash'ine göre belirtilen bir Graph Düğümü'ne subgraph'i dağıtır ve yapım dosyalarını belirtilen IPFS uç noktasına yükler ``` Düğüm yapılandırması, subgraph'ın nerede dağıtıldığına bağlı olacaktır. @@ -195,13 +195,13 @@ Düğüm yapılandırması, subgraph'ın nerede dağıtıldığına bağlı olac ```sh graph auth -graph deploy +graph deploy ``` ### Yerel Graph Düğümü (varsayılan yapılandırmaya göre) ```sh -graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 +graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` Subgraph'ınız dağıtıldıktan sonra Graph Düğüme tarafından indekslenecektir. Subgraph'ın kendisini sorgulayarak ilerlemesini kontrol edebilirsiniz: @@ -228,17 +228,17 @@ Yukarıdaki bileşenlerin çalıştırılması hakkında yakında daha fazla bil ## NEAR Subgraph'ını Sorgulama -NEAR subgraphları için GraphQL uç noktası, mevcut API arayüzü ile şema tanımı tarafından belirlenir. Daha fazla bilgi için lütfen [GraphQL API dökümantasyonunu](/subgraphs/querying/graphql-api/) ziyaret edin. +NEAR subgraph'leri için GraphQL uç noktası, mevcut API arayüzü ile şema tanımına göre belirlenir. Daha fazla bilgi için [GraphQL API dokümantasyonuna](/subgraphs/querying/graphql-api/) bakabilirsiniz. ## Örnek Subgraph'ler -Here are some example subgraphs for reference: +Aşağıda bazı örnek subgraph'leri bulabilirsiniz: [NEAR Blokları](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) [NEAR Makbuzları](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## SSS +## FAQ ### Beta nasıl çalışır? @@ -254,7 +254,7 @@ Hayır, bir subgraph yalnızca bir zincirden/ağdan veri kaynaklarını destekle ### Makbuz işleyicileri hesaplar ve bunların alt hesapları için tetiklenecek mi? -Bir `account` belirtilirse, bu yalnızca tam hesap adıyla eşleşecektir. Hesapları ve alt hesapları eşleştirmek için `suffixes` ve `prefixes` ile birlikte bir `accounts` alanı belirterek alt hesapları eşleştirmek mümkündür, örneğin aşağıdaki tüm `mintbase1.near` alt hesaplarıyla eşleşir: +Bir `account` belirtildiyse, yalnızca tam hesap adı eşleştirilecektir. `accounts` alanı belirterek alt hesapları eşleştirmek mümkündür. `suffixes` (önekleri) ve `prefixes`'i (sonekleri) ve `accounts` alanını da belirterek, alt hesapları eşleştirmek mümkündür. Örneğin, aşağıdaki `mintbase1.near` alt hesaplarının tümünü eşleştirecektir: ```yaml accounts: @@ -276,8 +276,8 @@ Bekleme fonksiyonelliği henüz NEAR subgraphları için desteklenmemektedir. Bu ### Sorum yanıtlanmadı, NEAR subgraphları oluşturma konusunda nereden daha fazla yardım alabilirim? -Subgraph geliştirme hakkında genel bir soruysa, [Geliştirici dökümantasyonu'nun](/subgraphs/quick-start/) geri kalanında çok daha fazla bilgi bulunmaktadır. Aksi durumda lütfen [The Graph Protocol Discord](https://discord.gg/graphprotocol) sunucusuna katılın ve #near kanalında sorunuzu sorun veya near@thegraph.com adresine e-posta gönderin. +Eğer subgraph geliştirme ile ilgili genel bir soru ise, [Geliştirici dokümantasyonunun](/subgraphs/quick-start/) geri kalanında çok daha fazla bilgi bulunmaktadır. Eğer burada aradığınızı bulamazsanız lütfen [The Graph Protocol Discord](https://discord.gg/graphprotocol) sunucusuna katılın ve #near kanalında sorunuzu sorun. Veya near@thegraph.com adresine e-posta gönderin. ## Referanslar -- [NEAR geliştirici dökümantasyonu](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR geliştirici dokümantasyonu](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From 997e70fa9e3275fbc7ce968b145d96e05a1fe0eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:55 -0500 Subject: [PATCH 0708/1534] New translations near.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/cookbook/near.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/near.mdx b/website/src/pages/uk/subgraphs/cookbook/near.mdx index 1ad5e3c45c00..a94bd9531fdb 100644 --- a/website/src/pages/uk/subgraphs/cookbook/near.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/near.mdx @@ -12,7 +12,7 @@ This guide is an introduction to building subgraphs indexing smart contracts on The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -96,7 +96,7 @@ Schema definition describes the structure of the resulting subgraph database and ### AssemblyScript Mappings -Обробники для виконання подій написані на мові [AssemblyScript](https://www.assemblyscript.org/). +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). From 142f4d4ac79fa20d7f7b8cfefaedc1118e3d800f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:56 -0500 Subject: [PATCH 0709/1534] New translations near.mdx (Chinese Simplified) --- .../src/pages/zh/subgraphs/cookbook/near.mdx | 66 +++++++++---------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/near.mdx b/website/src/pages/zh/subgraphs/cookbook/near.mdx index b2544c076d8a..6bac46becff8 100644 --- a/website/src/pages/zh/subgraphs/cookbook/near.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/near.mdx @@ -2,20 +2,20 @@ title: 在 NEAR 上构建子图 --- -本指南介绍了如何在[NEAR 区块链](https://docs.near.org/)上构建索引智能合约的子图。 +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). -## NEAR 是什么? +## What is NEAR? [NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. -## NEAR 子图是什么? +## What are NEAR subgraphs? -Graph 为开发人员提供了一种被称为子图的工具,利用这个工具,开发人员能够处理区块链事件,并通过 GraphQL API 提供结果数据。 [Graph 节点](https://github.com/graphprotocol/graph-node)现在能够处理 NEAR 事件,这意味着 NEAR 开发人员现在可以构建子图来索引他们的智能合约。 +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -子图是基于事件的,这意味着子图可以侦听并处理链上事件。 NEAR 子图目前支持两种类型的处理程序: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: -- 区块处理器: 这些处理程序在每个新区块上运行。 -- 收据处理器: 每次在指定账户上一个消息被执行时运行。 +- Block handlers: these are run on every new block +- Receipt handlers: run every time a message is executed at a specified account [From the NEAR documentation](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): @@ -23,19 +23,19 @@ Graph 为开发人员提供了一种被称为子图的工具,利用这个工 ## 构建 NEAR 子图 -`@graphprotocol/graph-cli`是一个用于构建和部署子图的命令行工具。 +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts` 是子图特定类型的库。 +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -NEAR 子图开发需要`0.23.0`以上版本的`graph-cli`,以及 `0.23.0`以上版本的`graph-ts`。 +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > 构建 NEAR 子图与构建索引以太坊的子图非常相似。 子图定义包括三个方面: -**subgraph.yaml:** 子图清单,定义感兴趣的数据源以及如何处理它们。 NEAR 是一种全新`类型`数据源。 +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** 一个模式文件,定义子图存储的数据以及如何通过 GraphQL 查询数据。NEAR 子图的要求已经在[现有的文档](/developing/creating-a-subgraph/#the-graphql-schema)中介绍了。 +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. @@ -48,7 +48,7 @@ $ graph build # 从 AssemblyScript 文件生成 Web Assembly,并在 /build 文 ### 子图清单定义 -子图清单(`subgraph.yaml`)标识子图的数据源、感兴趣的触发器以及响应这些触发器而运行的函数。 以下是一个 NEAR 的子图清单的例子: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR 子图引入了一种新的 `kind` 数据源(`near`)。 +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- NEAR 数据源引入了一个替代的可选 `source. account` 字段,其中包含可选的后缀和前缀。至少必须指定前缀或后缀,它们将分别与以值列表开始或结束的任何账户匹配。下面的例子将匹配: `[ app | good]。* [ morning.near | morning.testnet]`.如果只需要一个前缀或后缀列表,则可以省略其他字段。 +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml accounts: @@ -85,18 +85,18 @@ accounts: - morning.testnet ``` -NEAR 数据源支持两种类型的处理程序: +NEAR data sources support two types of handlers: -- `blockHandlers`:在每个新的 NEAR 区块上运行。 不需要 `source.account`。 +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). ### 模式定义 -模式定义描述了生成的子图数据库结构以及实体之间的关系。这与原始数据源是不可知的。[这里](/developing/creating-a-subgraph/#the-graphql-schema)有更多关于子图模式定义的细节。 +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### AssemblyScript 映射 -处理事件的处理程序是用 [AssemblyScript](https://www.assemblyscript.org/) 编写的。 +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -160,10 +160,10 @@ class ReceiptWithOutcome { } ``` -这些类型被传递给区块 & 收据处理程序: +These types are passed to block & receipt handlers: -- 块处理程序将收到 `Block` -- 收据处理程序将收到 `ReceiptWithOutcome` +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. @@ -171,18 +171,18 @@ This includes a new JSON parsing function - logs on NEAR are frequently emitted ## 部署 NEAR 子图 -构建子图后,就可以将其部署到 Graph节点以进行索引了。 NEAR 子图可以部署到任何图节点 `>=v0.26.x`(此版本尚未标记和发布)。 +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: -- `near-主网` -- `near-测试网` +- `near-mainnet` +- `near-testnet` More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -创建子图后,您可以使用 `graph deploy` CLI 命令部署子图: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,17 +228,17 @@ graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 Date: Fri, 14 Feb 2025 12:56:57 -0500 Subject: [PATCH 0710/1534] New translations near.mdx (Urdu (Pakistan)) --- .../src/pages/ur/subgraphs/cookbook/near.mdx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/near.mdx b/website/src/pages/ur/subgraphs/cookbook/near.mdx index f719a19bf33e..3ff637ebf556 100644 --- a/website/src/pages/ur/subgraphs/cookbook/near.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/near.mdx @@ -2,7 +2,7 @@ title: سب گرافس کو NEAR پر بنانا --- -یہ گائیڈ [NEAR بلاکچین](https://docs.near.org/) پر سمارٹ کنٹریکٹ کو ترتیب دینے والے سب گراف کی تعمیر کا ایک تعارف ہے. +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## NEAR کیا ہے؟ @@ -10,9 +10,9 @@ title: سب گرافس کو NEAR پر بنانا ## NEAR سب گراف کیا ہیں؟ -گراف ڈویلپرز کو بلاکچین ایونٹس پر کارروائی کرنے کے لیے ٹولز دیتا ہے اور نتیجے میں ڈیٹا کو GraphQL API کے ذریعے آسانی سے دستیاب کرتا ہے، جسے انفرادی طور پر سب گراف کے نام سے جانا جاتا ہے۔ [گراف نوڈ](https://github.com/graphprotocol/graph-node) اب NEAR ایونٹس پر کارروائی کرنے کے قابل ہے، جس کا مطلب ہے کہ NEAR ڈویلپرز اب اپنے سمارٹ کنٹریکٹ کو انڈیکس کرنے کے لیے سب گراف بنا سکتے ہیں. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -سب گراف ایونٹس پر مبنی ہیں, جس کا مطلب ہے کہ وہ آن چین ایونٹس کو سنتے ہیں اور پھر اس پر کارروائی کرتے ہیں. NEAR سب گرافس کے لیے فی الحال دو قسم کے ہینڈلرز کی حمایت کی جاتی ہے: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - بلاک ہینڈلرز: یہ ہر نۓ بلاک پر چلتے ہیں - ریسیپٹ ہینڈلرز: ہر بار جب کسی مخصوص اکاؤنٹ پر کوئی پیغام عمل میں آۓ تو چلتا ہے @@ -23,32 +23,32 @@ title: سب گرافس کو NEAR پر بنانا ## NEAR سب گراف بنانا -`@graphprotocol/graph-cli` ایک کمانڈ لائن ٹول ہے جو سب گرافس بناتا اور تعینات کرتا ہے. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts` سب گراف کی مخصوص اقسام کی لائبریری ہے. +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -NEAR سب گراف ڈیولپمنٹ کے لیے `graph-cli` اوپر والے ورژن `0.23.0`، اور `graph-ts` اوپر والے ورژن `0.23.0` کی ضرورت ہوتی ہے. +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > NEAR سب گراف کی تعمیر ایک سب گراف بنانے کے مترادف ہے جو ایتھریم کو انڈیکس کرتا ہے. سب گراف کی تعریف کے تین پہلو ہیں: -**سب گراف.yaml:** سب گراف مینی فیسٹ، دلچسپی کے ڈیٹا کے ذرائع کی وضاحت کرتا ہے، اور ان پر کارروائی کیسے کی جانی چاہیے۔ NEAR ڈیٹا سورس کا ایک نیا `قسم` ہے. +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -** schema.graphql:** ایک اسکیما فائل جو اس بات کی وضاحت کرتی ہے کہ آپ کے سب گراف کے لیے کون سا ڈیٹا محفوظ کیا جاتا ہے، اور GraphQL کے ذریعے اس سے کیوری کیسے کیا جائے۔ NEAR سب گراف کے تقاضوں کا احاطہ [موجودہ دستاویزات](/developing/creating-a-subgraph/#the-graphql-schema) سے ہوتا ہے. +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. سب گراف کی ترقی کے دوران دو اہم کمانڈز ہیں: ```bash -$ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$graph codegen # ظاہر میں شناخت کردہ اسکیما فائل سے اقسام تیار کرتا ہے۔ +$graph build # اسمبلی سکرپٹ فائلوں سے ویب اسمبلی تیار کرتا ہے، اور تمام ذیلی گراف فائلوں کو /build فولڈر میں تیار کرتا ہے۔ ``` ### سب گراف مینی فیسٹ کی تعریف -سب گراف مینی فیسٹ (`subgraph.yaml`) سب گراف کے لیے ڈیٹا کے ذرائع، دلچسپی کے محرکات، اور ان افعال کی نشاندہی کرتا ہے جو ان محرکات کے جواب میں چلائے جانے چاہئیں۔ NEAR سب گراف کے لیے ذیل میں سب گراف مینی فیسٹ کی مثال دیکھیں: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR سب گراف ڈیٹا ماخذ کا ایک نیا `kind` متعارف کراتے ہیں (`near`) +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- NEAR ڈیٹا کے ذرائع ایک متبادل اختیاری `source.accounts` فیلڈ متعارف کراتے ہیں، جس میں اختیاری لاحقے اور سابقے ہوتے ہیں۔ کم از کم سابقہ ​​یا لاحقہ متعین ہونا ضروری ہے، وہ بالترتیب اقدار کی فہرست کے ساتھ شروع یا ختم ہونے والے کسی بھی اکاؤنٹ سے مماثل ہوں گے۔ نیچے دی گئی مثال مماثل ہوگی: `[app|good].*[morning.near|morning.testnet]`۔ اگر صرف سابقوں یا لاحقوں کی فہرست ضروری ہو تو دوسری فیلڈ کو چھوڑا جا سکتا ہے. +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml accounts: @@ -87,16 +87,16 @@ accounts: قریبی ڈیٹا ذرائع دو قسم کے ہینڈلرز کی حمایت کرتے ہیں: -- `blockHandlers`: ہر نئے NEAR بلاک پر چلائیں۔ کسی `source.account` کی ضرورت نہیں ہے. +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). ### اسکیما کی تعریف -سکیما کی تعریف نتیجے میں سب گراف ڈیٹا بیس کی ساخت اور ہستیوں کے درمیان تعلقات کو بیان کرتی ہے۔ یہ اصل ڈیٹا ماخذ کے بارے میں علمی ہے۔ سب گراف اسکیما کی تعریف کے بارے میں مزید تفصیلات [یہاں](/developing/creating-a-subgraph/#the-graphql-schema) ہیں. +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### اسمبلی اسکرپٹ سب میپنک -پروسیسنگ ایونٹس کے ہینڈلرز [اسمبلی اسکرپٹ](https://www.assemblyscript.org/) میں لکھے گئے ہیں. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -160,10 +160,10 @@ class ReceiptWithOutcome { } ``` -ان اقسام کو بلاک اور ریسیپٹ ہینڈلرز میں منتقل کیا جاتا ہے: +These types are passed to block & receipt handlers: -- بلاک ہینڈلرز کو ایک `Block` ملے گا -- ریسیپٹ ہینڈلرز کو ایک `ReceiptWithOutcome` ملے گا +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. @@ -171,7 +171,7 @@ This includes a new JSON parsing function - logs on NEAR are frequently emitted ## NEAR سب گراف کی تعیناتی -ایک بار جب آپ کے پاس بلٹ سب گراف ہو جاتا ہے، تو یہ وقت ہے کہ اسے انڈیکسنگ کے لیے گراف نوڈ میں تعینات کریں۔ NEAR سب گراف کو کسی بھی گراف نوڈ `>=v0.26.x` پر تعینات کیا جا سکتا ہے (اس ورژن کو ابھی تک ٹیگ اور ریلیز نہیں کیا گیا ہے). +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: @@ -182,7 +182,7 @@ More information on creating and deploying subgraphs on Subgraph Studio can be f As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -ایک دفا آپ کا سب گراف بن گیا ہے، آپ اپنا سب گراف `graph deploy` کی CLI کمانڈ کا استعمال کرتے ہوۓ تعینات کر سکتے ہیں: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,17 +228,17 @@ graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 Date: Fri, 14 Feb 2025 12:56:58 -0500 Subject: [PATCH 0711/1534] New translations near.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/cookbook/near.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/near.mdx b/website/src/pages/vi/subgraphs/cookbook/near.mdx index 324ff08549e1..6060eb27e761 100644 --- a/website/src/pages/vi/subgraphs/cookbook/near.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/near.mdx @@ -12,7 +12,7 @@ This guide is an introduction to building subgraphs indexing smart contracts on The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - Block handlers: these are run on every new block - Receipt handlers: run every time a message is executed at a specified account @@ -238,7 +238,7 @@ Here are some example subgraphs for reference: [NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## CÂU HỎI THƯỜNG GẶP +## FAQ ### How does the beta work? From 08d7100b4e56525782986a67717666b1320fd907 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:56:59 -0500 Subject: [PATCH 0712/1534] New translations near.mdx (Marathi) --- .../src/pages/mr/subgraphs/cookbook/near.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/near.mdx b/website/src/pages/mr/subgraphs/cookbook/near.mdx index 89b6b4eba868..6e790fdcb0cf 100644 --- a/website/src/pages/mr/subgraphs/cookbook/near.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/near.mdx @@ -10,9 +10,9 @@ This guide is an introduction to building subgraphs indexing smart contracts on ## NEAR subgraphs म्हणजे काय? -ग्राफ डेव्हलपरला ब्लॉकचेन इव्हेंट्सवर प्रक्रिया करण्यासाठी आणि परिणामी डेटा ग्राफक्यूएल API द्वारे सहज उपलब्ध करण्यासाठी साधने देतो, जो वैयक्तिकरित्या सबग्राफ म्हणून ओळखला जातो. [ग्राफ नोड](https://github.com/graphprotocol/graph-node) आता जवळच्या इव्हेंटवर प्रक्रिया करण्यास सक्षम आहे, याचा अर्थ असा की NEAR डेव्हलपर आता त्यांचे स्मार्ट करार अनुक्रमित करण्यासाठी सबग्राफ तयार करू शकतात. +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -सबग्राफ इव्हेंट-आधारित असतात, याचा अर्थ ते ऐकतात आणि नंतर ऑन-चेन इव्हेंटवर प्रक्रिया करतात. सध्या जवळच्या सबग्राफसाठी दोन प्रकारचे हँडलर समर्थित आहेत: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - ब्लॉक हँडलर: हे प्रत्येक नवीन ब्लॉकवर चालवले जातात - पावती हँडलर्स: निर्दिष्ट खात्यावर संदेश कार्यान्वित झाल्यावर प्रत्येक वेळी चालवा @@ -23,19 +23,19 @@ This guide is an introduction to building subgraphs indexing smart contracts on ## एक NEAR सबग्राफतयार करणे -`@graphprotocol/graph-cli` हे सबग्राफ तयार करण्यासाठी आणि तैनात करण्यासाठी कमांड-लाइन साधन आहे. +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts` ही सबग्राफ-विशिष्ट प्रकारांची लायब्ररी आहे. +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -निअर सबग्राफ डेव्हलपमेंटसाठी `ग्राफ-क्ली` वरील आवृत्ती `0.23.0` आणि `0.23.0` वरील आवृत्ती `graph-ts` आवश्यक आहे. +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > NEAR सबग्राफ तयार करणे, याची प्रक्रिया इथेरियमवरील सबग्राफ तयार करण्याशी खूप सामान्यतेने सादर करते. सबग्राफ व्याख्येचे तीन पैलू आहेत: -**subgraph.yaml:** सबग्राफ मॅनिफेस्ट, स्वारस्य असलेल्या डेटा स्रोतांची व्याख्या आणि त्यावर प्रक्रिया कशी करावी. NEAR डेटा स्रोताचा एक नवीन `प्रकार` आहे. +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** एक स्कीमा फाइल जी तुमच्या सबग्राफसाठी कोणता डेटा संग्रहित केला जातो आणि GraphQL द्वारे त्याची क्वेरी कशी करावी हे परिभाषित करते. जवळच्या सबग्राफसाठी आवश्यकता [विद्यमान दस्तऐवज](/developing/creating-a-subgraph/#the-graphql-schema) द्वारे कव्हर केल्या जातात. +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). **AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. @@ -48,7 +48,7 @@ $ graph build # असेंबलीस्क्रिप्ट फायली ### सबग्राफ मॅनिफेस्ट व्याख्या -सबग्राफ मॅनिफेस्ट (`subgraph.yaml`) सबग्राफसाठी डेटा स्रोत, स्वारस्य ट्रिगर आणि त्या ट्रिगरला प्रतिसाद म्हणून चालवल्या जाणार्‍या कार्ये ओळखतो. जवळच्या सबग्राफसाठी उदाहरण सबग्राफ मॅनिफेस्टसाठी खाली पहा: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs डेटा स्रोताचा एक नवीन `प्रकार` सादर करतात (`जवळ`) +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- NEAR डेटा स्रोत पर्यायी पर्यायी `source.accounts` फील्ड सादर करतात, ज्यामध्ये पर्यायी प्रत्यय आणि उपसर्ग असतात. किमान उपसर्ग किंवा प्रत्यय निर्दिष्ट करणे आवश्यक आहे, ते अनुक्रमे मूल्यांच्या सूचीसह सुरू होणाऱ्या किंवा समाप्त होणाऱ्या कोणत्याही खात्याशी जुळतील. खालील उदाहरण जुळेल: `[app|good].*[morning.near|morning.testnet]`. जर फक्त उपसर्ग किंवा प्रत्ययांची यादी आवश्यक असेल तर इतर फील्ड वगळले जाऊ शकते. +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml accounts: @@ -87,16 +87,16 @@ accounts: जवळील डेटा स्रोत दोन प्रकारच्या हँडलरला समर्थन देतात: -- `blockHandlers`: प्रत्येक नवीन NEAR ब्लॉकवर चालवा. कोणतेही `source.account` आवश्यक नाही. +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). ### स्कीमा व्याख्या -स्कीमा व्याख्या परिणामी सबग्राफ डेटाबेसची रचना आणि संस्थांमधील संबंधांचे वर्णन करते. हे मूळ डेटा स्रोताचे अज्ञेय आहे. सबग्राफ स्कीमा व्याख्या [येथे](/developing/creating-a-subgraph/#the-graphql-schema) अधिक तपशील आहेत. +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ### असेंबलीस्क्रिप्ट मॅपिंग -इव्हेंटवर प्रक्रिया करण्यासाठी हँडलर [AssemblyScript](https://www.assemblyscript.org/) मध्ये लिहिलेले आहेत. +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). @@ -160,10 +160,10 @@ class ReceiptWithOutcome { } ``` -हे प्रकार ब्लॉक करण्यासाठी पास केले जातात & पावती हाताळणारे: +These types are passed to block & receipt handlers: -- ब्लॉक हँडलर्सना एक `ब्लॉक` मिळेल -- पावती हाताळणाऱ्यांना `ReceiptWithOutcome` मिळेल +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. @@ -171,18 +171,18 @@ This includes a new JSON parsing function - logs on NEAR are frequently emitted ## NEAR सबग्राफ डिप्लॉय करण्यासाठी -एकदा तुमच्याकडे अंगभूत सबग्राफ आला की, तो अनुक्रमणिकासाठी ग्राफ नोडवर तैनात करण्याची वेळ आली आहे. जवळचे सबग्राफ कोणत्याही ग्राफ नोडवर तैनात केले जाऊ शकतात `>=v0.26.x` (&अद्याप टॅग केलेली नाही आणि रिलीज केलेली नाही). +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: -- `जवळ-मेननेट` -- `जवळ-टेस्टनेट` +- `near-mainnet` +- `near-testnet` More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio/). As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -एकदा तुमचा सबग्राफ तयार झाला की, तुम्ही `graph deploy` CLI कमांड वापरून तुमचा सबग्राफ उपयोजित करू शकता: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,15 +228,15 @@ NEAR ची अनुक्रमणिका देणारा आलेख ## NEAR सबग्राफची क्वेरी करणे -NEAR subgraphs साठी GraphQL एंडपॉइंट विद्यमान API इंटरफेससह स्कीमा व्याख्येद्वारे निर्धारित केला जातो. अधिक माहितीसाठी कृपया [GraphQL API दस्तऐवज](/subgraphs/querying/graphql-api/) ला भेट द्या. +The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. ## उदाहरणे सबग्राफ Here are some example subgraphs for reference: -[NEAR ब्लॉक](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[NEAR पावत्या](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) ## FAQ @@ -254,7 +254,7 @@ NEAR सपोर्ट बीटामध्ये आहे, याचा अ ### पावती हँडलर खाती आणि त्यांच्या उप-खात्यांसाठी ट्रिगर करतील का? -एखादे `खाते` निर्दिष्ट केले असल्यास, ते फक्त खाते नावाशी जुळेल. `खाती` फील्ड निर्दिष्ट करून उप-खाती जुळवणे शक्य आहे, उदाहरणार्थ, खाती आणि उप-खाती जुळण्यासाठी निर्दिष्ट केलेल्या `प्रत्यय` आणि `उपसर्ग` सह. खालील सर्व `mintbase1.near` उप-खात्यांशी जुळतील: +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: ```yaml accounts: @@ -280,4 +280,4 @@ If it is a general question about subgraph development, there is a lot more info ## संदर्भ -- [NEAR विकसक दस्तऐवजीकरण](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From 6a7e4be85f9906808a41ed3166ca27d94f72be79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:00 -0500 Subject: [PATCH 0713/1534] New translations near.mdx (Hindi) --- .../src/pages/hi/subgraphs/cookbook/near.mdx | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/near.mdx b/website/src/pages/hi/subgraphs/cookbook/near.mdx index 3b098b20ceab..6aab3eeedbb4 100644 --- a/website/src/pages/hi/subgraphs/cookbook/near.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/near.mdx @@ -2,7 +2,7 @@ title: NEAR पर सबग्राफ बनाना --- -यह गाइड [NEAR ब्लॉकचेन](https://docs.near.org/) पर स्मार्ट कॉन्ट्रैक्ट्स को इंडेक्स करने वाले सबग्राफ बनाने का परिचय है। +This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). ## NEAR क्या है? @@ -10,9 +10,9 @@ title: NEAR पर सबग्राफ बनाना ## NEAR सबग्राफ क्या हैं? -ग्राफ़ ब्लॉकचैन घटनाओं को प्रोसेस करने के लिए डेवलपर्स टूल देता है और परिणामी डेटा को एक ग्राफक्यूएल एपीआई के माध्यम से आसानी से उपलब्ध कराता है, जिसे व्यक्तिगत रूप से सबग्राफ के रूप में जाना जाता है। [ग्राफ़ नोड](https://github.com/graphprotocol/graph-node) अब NEAR इवेंट को प्रोसेस करने में सक्षम है, जिसका मतलब है कि NEAR डेवलपर अब अपने स्मार्ट कॉन्ट्रैक्ट को इंडेक्स करने के लिए सबग्राफ बना सकते हैं। +The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. -सबग्राफ घटना-आधारित होते हैं, जिसका अर्थ है कि वे ऑन-चेन घटनाओं को सुनते हैं और फिर उन्हें प्रोसेस करते हैं। वर्तमान में NEAR सबग्राफ के लिए समर्थित दो प्रकार के हैंडलर हैं: +Subgraphs are event-based, which means that they listen for and then process onchain events. There are currently two types of handlers supported for NEAR subgraphs: - ब्लॉक हैंडलर्स: ये हर नए ब्लॉक पर चलते हैं - रसीद हैंडलर: किसी निर्दिष्ट खाते पर संदेश निष्पादित होने पर हर बार चलें @@ -23,32 +23,32 @@ title: NEAR पर सबग्राफ बनाना ## NEAR सबग्राफ बनाना -`@graphprotocol/graph-cli` सबग्राफ बनाने और तैनात करने के लिए एक कमांड-लाइन टूल है। +`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. -`@graphprotocol/graph-ts` सबग्राफ-विशिष्ट प्रकार की एक लाइब्रेरी है। +`@graphprotocol/graph-ts` is a library of subgraph-specific types. -NEAR सबग्राफ डेवलपमेंट के लिए `graph-cli` का `0.23.0` के उपरोक्त संस्करण, और `graph-ts` का `0.23.0` के उपरोक्त संस्करण की आवश्यकता होती है. +NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. > NEAR सबग्राफ का निर्माण वह सबग्राफ के निर्माण के समान है जो एथेरियम को अनुक्रमित करता है। सबग्राफ परिभाषा के तीन पहलू हैं: -**subgraph.yaml:** सबग्राफ मेनिफेस्ट, रुचि के डेटा स्रोतों को परिभाषित करता है, और उन्हें कैसे संसाधित किया जाना चाहिए। NEAR डेटा स्रोत का `प्रकार` है। +**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. -**schema.graphql:** एक स्कीमा फ़ाइल जो परिभाषित करती है कि आपके सबग्राफ के लिए कौन सा डेटा इकट्ठा होगा, और इसे ग्राफ़क्यूएल के माध्यम से कैसे क्वेरी करें। NEAR सबग्राफ की आवश्यकताएं [मौजूदा दस्तावेज़ीकरण](/developing/creating-a-subgraph/#the-graphql-schema) द्वारा कवर की गई हैं। +**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). -**AssemblyScript मापिंग:** [AssemblyScript कोड](/subgraphs/developing/creating/graph-ts/api/) जो घटना डेटा को आपकी स्कीमा में परिभाषित संस्थाओं में अनुवादित करता है। NEAR समर्थन NEAR-विशिष्ट डेटा प्रकार और नई JSON पार्सिंग कार्यक्षमता पेश करता है। +**AssemblyScript Mappings:** [AssemblyScript code](/subgraphs/developing/creating/graph-ts/api/) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -सबग्राफ डेवलपमेंट के दौरान दो प्रमुख कमांड होते हैं: +सब ग्राफ को बनाते वक़्त दो मुख्य कमांड हैं: ```bash $ graph codegen # generates types from the schema file identified in the manifest $ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder ``` -### सबग्राफ मेनिफेस्ट परिभाषा +### सब ग्राफ मैनिफेस्ट की परिभाषा -सबग्राफ मेनिफ़ेस्ट (`subgraph.yaml`) सबग्राफ़ के लिए डेटा स्रोत, रुचि के ट्रिगर और उन ट्रिगर के जवाब में चलाए जाने वाले फ़ंक्शन की पहचान करता है. NEAR सबग्राफ के लिए एक उदाहरण सबग्राफ मेनिफेस्ट के लिए नीचे देखें: +The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: ```yaml specVersion: 0.0.2 @@ -70,10 +70,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR सबग्राफ डेटा स्रोत के एक नए `प्रकार` के साथ परिचित करता हैं (`near`) +- NEAR subgraphs introduce a new `kind` of data source (`near`) - The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` - NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- NEAR डेटा स्रोत एक वैकल्पिक वैकल्पिक `source.accounts` फ़ील्ड प्रस्तुत करते हैं, जिसमें वैकल्पिक प्रत्यय और उपसर्ग होते हैं। कम से कम उपसर्ग या प्रत्यय निर्दिष्ट किया जाना चाहिए, वे क्रमशः मूल्यों की सूची के साथ शुरू या समाप्त होने वाले किसी भी खाते से मेल खाएंगे। नीचे दिया गया उदाहरण इससे मेल खाएगा: `[app|good].*[morning.near|morning.testnet]`। यदि केवल उपसर्गों या प्रत्ययों की सूची आवश्यक है तो अन्य फ़ील्ड को छोड़ा जा सकता है। +- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. ```yaml accounts: @@ -87,18 +87,18 @@ accounts: NEAR डेटा स्रोत दो प्रकार के हैंडलर का समर्थन करते हैं: -- `blockHandlers`: हर नए NEAR ब्लॉक पर चलता है। कोई `source.account` आवश्यक नहीं है। +- `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). -### स्कीमा परिभाषा +### स्कीमा की परिभाषा -स्कीमा परिभाषा परिणामी सबग्राफ डेटाबेस की संरचना और संस्थाओं के बीच संबंधों का वर्णन करती है। यह मूल डेटा स्रोत का अज्ञेयवादी है। सबग्राफ स्कीमा परिभाषा [यहां](/developing/creating-a-subgraph/#the-graphql-schema) पर अधिक विवरण हैं। +Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). -### असेंबलीस्क्रिप्ट मैपिंग्स +### असेंबली स्क्रिप्ट मैप्पिंग्स -इवेंट को प्रोसेस करने के लिए हैंडलर [AssemblyScript](https://www.assemblyscript.org/) में लिखे होते हैं। +The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). -NEAR indexing 'NEAR' के लिए विशिष्ट डेटा प्रकारों को [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) में पेश करता है। +NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/). ```typescript @@ -160,18 +160,18 @@ class ReceiptWithOutcome { } ``` -इन प्रकारों को ब्लॉक के लिए पास किया जाता है& रसीद संचालक: +These types are passed to block & receipt handlers: -- ब्लॉक हैंडलर्स को एक `ब्लॉक` प्राप्त होगा -- रसीद संचालकों को `ReceiptWithOutcome` प्राप्त होगा +- Block handlers will receive a `Block` +- Receipt handlers will receive a `ReceiptWithOutcome` -अन्यथा, [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) का शेष भाग NEAR subgraph डेवलपर्स के लिए मैपिंग निष्पादन के दौरान उपलब्ध है। +Otherwise, the rest of the [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/) is available to NEAR subgraph developers during mapping execution. -यह एक नई JSON पार्सिंग फ़ंक्शन को शामिल करता है - NEAR पर लॉग अक्सर स्ट्रिंगिफाइड JSONs के रूप में उत्पन्न होते हैं। एक नई `json.fromString(...)` फ़ंक्शन [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) का हिस्सा के रूप में उपलब्ध है, जिससे डेवलपर्स इन लॉग्स को आसानी से प्रोसेस कर सकते हैं। +This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/subgraphs/developing/creating/graph-ts/api/#json-api) to allow developers to easily process these logs. ## एक NEAR सबग्राफ की तैनाती -एक बार आपके पास एक निर्मित सबग्राफ हो जाने के बाद, इसे अनुक्रमण के लिए ग्राफ़ नोड पर तैनात करने का समय आ गया है। NEAR सबग्राफ को किसी भी ग्राफ़ नोड `>=v0.26.x` पर तैनात किया जा सकता है (यह संस्करण अभी तक टैग& और जारी नहीं किया गया है)। +Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: @@ -182,7 +182,7 @@ More information on creating and deploying subgraphs on Subgraph Studio can be f As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". -एक बार आपका सबग्राफ बन जाने के बाद, आप `graph deploy` CLI कमांड का उपयोग करके अपना सबग्राफ डिप्लॉय कर सकते हैं: +Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: ```sh $ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) @@ -228,17 +228,17 @@ NEAR को अनुक्रमित करने वाले ग्रा ## NEAR सबग्राफ को क्वेरी करना -NEAR सबग्राफ के लिए ग्राफक्यूएल एंडपॉइंट मौजूदा एपीआई इंटरफेस के साथ स्कीमा परिभाषा द्वारा निर्धारित किया जाता है। अधिक जानकारी के लिए कृपया [GraphQL API दस्तावेज़ीकरण](/subgraphs/querying/graphql-api/) पर जाएं। +The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/subgraphs/querying/graphql-api/) for more information. -## उदाहरण सबग्राफ +## सब-ग्राफ के उदाहरण -संदर्भ के लिए यहां कुछ उदाहरण सबग्राफ दिए गए हैं: +Here are some example subgraphs for reference: -[NEAR ब्लॉक्स](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[NEAR रेसिप्टस](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## सामान्य प्रश्न +## FAQ ### बीटा कैसे काम करता है? @@ -246,7 +246,7 @@ NEAR समर्थन बीटा में है, जिसका मतल ### Can a subgraph index both NEAR and EVM chains? -नहीं, एक सबग्राफ केवल एक श्रृंखला/नेटवर्क से डेटा स्रोतों का समर्थन कर सकता है। +नहीं, एक सब-ग्राफ केवल एक चेन/नेटवर्क से डाटा सोर्स को सपोर्ट कर सकता है ### क्या सबग्राफ अधिक विशिष्ट ट्रिगर्स पर प्रतिक्रिया कर सकते हैं? @@ -254,7 +254,7 @@ NEAR समर्थन बीटा में है, जिसका मतल ### क्या रसीद हैंडलर खातों और उनके उप-खातों के लिए ट्रिगर करेंगे? -यदि कोई `खाता` निर्दिष्ट किया गया है, तो वह केवल सटीक खाता नाम से मेल खाएगा। उदाहरण के लिए, खातों और उप-खातों के मिलान के लिए निर्दिष्ट `प्रत्यय` और `उपसर्ग` के साथ, `खाते` फ़ील्ड निर्दिष्ट करके उप-खातों का मिलान करना संभव है निम्नलिखित सभी `mintbase1.near` उप-खातों से मेल खाएगा: +If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: ```yaml accounts: @@ -280,4 +280,4 @@ If it is a general question about subgraph development, there is a lot more info ## संदर्भ -- [NEAR डेवलपर दस्तावेज़ीकरण](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) +- [NEAR developer documentation](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) From ac128ea7bf754c54de4ada6d97b7e89fecb46c86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:01 -0500 Subject: [PATCH 0714/1534] New translations pruning.mdx (Romanian) --- website/src/pages/ro/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ro/subgraphs/cookbook/pruning.mdx b/website/src/pages/ro/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/ro/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 75f5bf987527e2b2288a8bd2f1ffb2b0055f839b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:02 -0500 Subject: [PATCH 0715/1534] New translations pruning.mdx (French) --- .../pages/fr/subgraphs/cookbook/pruning.mdx | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/pruning.mdx b/website/src/pages/fr/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..88a89c5e1c97 100644 --- a/website/src/pages/fr/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/pruning.mdx @@ -1,22 +1,23 @@ --- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +title: Meilleure Pratique Subgraph 1 - Améliorer la Vitesse des Requêtes avec le Pruning de Subgraph +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. +[Le pruning](/developing/creating-a-subgraph/#prune) (élagage) retire les entités archivées de la base de données des subgraphs jusqu'à un bloc donné, et retirer les entités inutilisées de la base de données d'un subgraph améliorera souvent de manière spectaculaire les performances de requête d'un subgraph. L'utilisation de `indexerHints` est un moyen simple de réaliser le pruning d'un subgraph. -## How to Prune a Subgraph With `indexerHints` +## Comment effectuer le Pruning d'un subgraph avec `indexerHints` -Add a section called `indexerHints` in the manifest. +Ajoutez une section appelée `indexerHints` dans le manifest. -`indexerHints` has three `prune` options: +`indexerHints` dispose de trois options de `prune` : -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: auto`: Conserve l'historique minimum nécessaire tel que défini par l'Indexeur, optimisant ainsi les performances des requêtes. C'est le paramètre généralement recommandé et celui par défaut pour tous les subgraphs créés par `graph-cli` >= 0.66.0. +- `prune: `: Définit une limite personnalisée sur le nombre de blocs historiques à conserver. - `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: +Nous pouvons ajouter `indexerHints` à nos subgraphs en mettant à jour notre `subgraph.yaml`: ```yaml specVersion: 1.0.0 @@ -30,7 +31,7 @@ dataSources: network: mainnet ``` -## Important Considerations +## Points Importants - If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. @@ -38,7 +39,7 @@ dataSources: ## Conclusion -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. +L'élagage en utilisant `indexerHints` est une meilleure bonne pour le développement de subgraphs, offrant des améliorations significatives des performances des requêtes. ## Subgraph Best Practices 1-6 From a39575d4e7a9df618670f767cb4ccbd975cf1b87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:03 -0500 Subject: [PATCH 0716/1534] New translations pruning.mdx (Spanish) --- website/src/pages/es/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/es/subgraphs/cookbook/pruning.mdx b/website/src/pages/es/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/es/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/es/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From d43b675f39f31b657f014596c1ec5905772f02c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:04 -0500 Subject: [PATCH 0717/1534] New translations pruning.mdx (Arabic) --- website/src/pages/ar/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ar/subgraphs/cookbook/pruning.mdx b/website/src/pages/ar/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/ar/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 0848e0d9c15afe9eb7fd6941b9f65355ce6ed9fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:05 -0500 Subject: [PATCH 0718/1534] New translations pruning.mdx (Czech) --- website/src/pages/cs/subgraphs/cookbook/pruning.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/pruning.mdx b/website/src/pages/cs/subgraphs/cookbook/pruning.mdx index c818c06f37d5..9bae5117f904 100644 --- a/website/src/pages/cs/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Doporučený postup 1 - Zlepšení rychlosti dotazu pomocí ořezávání podgrafů +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR @@ -14,7 +15,7 @@ Přidejte do manifestu sekci `indexerHints`. - `prune: auto`: Udržuje minimální potřebnou historii nastavenou indexátorem, čímž optimalizuje výkon dotazu. Toto je obecně doporučené nastavení a je výchozí pro všechny podgrafy vytvořené pomocí `graph-cli` >= 0.66.0. - `prune: `: Nastaví vlastní omezení počtu historických bloků, které se mají zachovat. -- `prune: never`: Je výchozí, pokud není k dispozici sekce `indexerHints`. `prune: never` by mělo být vybráno, pokud jsou požadovány [Dotazy na cestování časem](/subgraphs/querying/graphql-api/#time-travel-queries). +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. Aktualizací souboru `subgraph.yaml` můžeme do podgrafů přidat `indexerHints`: @@ -32,9 +33,9 @@ dataSources: ## Důležité úvahy -- Pokud jsou kromě ořezávání požadovány i [dotazy na cestování v čase](/subgraphs/querying/graphql-api/#time-travel-queries), musí být ořezávání provedeno přesně, aby byla zachována funkčnost dotazů na cestování v čase. Z tohoto důvodu se obecně nedoporučuje používat `indexerHints: prune: auto` s Time Travel Queries. Místo toho proveďte ořezávání pomocí `indexerHints: prune: ` pro přesné ořezání na výšku bloku, která zachovává historická data požadovaná dotazy Time Travel, nebo použijte `prune: never` pro zachování všech dat. +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. -- Není možné [roubovat](/subgraphs/cookbook/grafting/) na výšku bloku, který byl prořezán. Pokud se roubování provádí běžně a je požadováno prořezání, doporučuje se použít `indexerHints: prune: ` který přesně zachová stanovený počet bloků (např. dostatečný počet na šest měsíců). +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). ## Závěr From c2b33121aeb718dacd5669fcf8d4d01fdf1e247e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:06 -0500 Subject: [PATCH 0719/1534] New translations pruning.mdx (German) --- website/src/pages/de/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/de/subgraphs/cookbook/pruning.mdx b/website/src/pages/de/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/de/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/de/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From fa43d733a2476247ca179c372bd9db8c6d792764 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:07 -0500 Subject: [PATCH 0720/1534] New translations pruning.mdx (Italian) --- website/src/pages/it/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/it/subgraphs/cookbook/pruning.mdx b/website/src/pages/it/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/it/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/it/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 922aff985d79275371be004751e96f67bcc74d73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:08 -0500 Subject: [PATCH 0721/1534] New translations pruning.mdx (Japanese) --- website/src/pages/ja/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ja/subgraphs/cookbook/pruning.mdx b/website/src/pages/ja/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/ja/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 2629b9e29e93e920cd3a702a1017f029d874cb87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:09 -0500 Subject: [PATCH 0722/1534] New translations pruning.mdx (Korean) --- website/src/pages/ko/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ko/subgraphs/cookbook/pruning.mdx b/website/src/pages/ko/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/ko/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From fa26fe82767043b80ba33fef95739a9b64d1c0b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:10 -0500 Subject: [PATCH 0723/1534] New translations pruning.mdx (Dutch) --- website/src/pages/nl/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/nl/subgraphs/cookbook/pruning.mdx b/website/src/pages/nl/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/nl/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From d41cb6dd58b29da50850b17f9f489863cc5a25be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:11 -0500 Subject: [PATCH 0724/1534] New translations pruning.mdx (Polish) --- website/src/pages/pl/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/pl/subgraphs/cookbook/pruning.mdx b/website/src/pages/pl/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/pl/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 4d9764292954498ab7f11480957d39da00df8143 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:12 -0500 Subject: [PATCH 0725/1534] New translations pruning.mdx (Portuguese) --- .../src/pages/pt/subgraphs/cookbook/pruning.mdx | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/pruning.mdx b/website/src/pages/pt/subgraphs/cookbook/pruning.mdx index 5a3d1ac12a50..ffc706bcb26a 100644 --- a/website/src/pages/pt/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Boas Práticas de Subgraph 1 - Acelerar Queries com Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR @@ -14,7 +15,7 @@ O `indexerHints` tem três opções de `prune`: - `prune: auto`: Guarda o histórico mínimo necessário, conforme configurado pelo Indexador, para otimizar o desempenho dos queries. Esta é a configuração geralmente recomendada e é padrão para todos os subgraphs criados pela `graph-cli` >= 0.66.0. - `prune: `: Determina um limite personalizado no número de blocos históricos a serem retidos. -- `prune: never`: Nenhum pruning de dados históricos; guarda o histórico completo e é o padrão caso não haja uma secção `indexerHints`. O `prune: never` deve ser selecionado caso queira [Queries de Viagem no Tempo](/subgraphs/querying/graphql-api/#time-travel-queries). +- `prune: never`: Não será feito pruning de dados históricos; guarda o histórico completo, e é o padrão caso não haja uma secção `indexerHints`. `prune: never` deve ser selecionado caso queira [Queries de Viagem no Tempo](/subgraphs/querying/graphql-api/#time-travel-queries). Podemos adicionar `indexerHints` aos nossos subgraphs ao atualizar o nosso `subgraph.yaml`: @@ -40,16 +41,16 @@ dataSources: O pruning com `indexerHints` é uma boa prática para o desenvolvimento de subgraphs que oferece melhorias significativas no desempenho de queries. -## Subgraph Best Practices 1-6 +## Melhores Práticas para um Subgraph 1 – 6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Pruning: Reduza o Excesso de Dados do Seu Subgraph para Acelerar Queries](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Use o @derivedFrom para Melhorar a Resposta da Indexação e de Queries](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Melhore o Desempenho da Indexação e de Queries com o Uso de Bytes como IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Evite `eth-calls` para Acelerar a Indexação](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplifique e Otimize com Séries Temporais e Agregações](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Lance Hotfixes Mais Rápido com Enxertos](/subgraphs/cookbook/grafting-hotfix/) From 20e2ec7ff77e8cef7e98caeaa593050503ec271d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:13 -0500 Subject: [PATCH 0726/1534] New translations pruning.mdx (Russian) --- .../pages/ru/subgraphs/cookbook/pruning.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/pruning.mdx b/website/src/pages/ru/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..3cb2bcbf70f7 100644 --- a/website/src/pages/ru/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/pruning.mdx @@ -1,22 +1,23 @@ --- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +title: Лучшая практика субграфа 1 — Улучшение скорости запросов с помощью сокращения (Pruning) субграфа +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- -## TLDR +## Краткое содержание -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. +[Pruning](/developing/creating-a-subgraph/#prune) удаляет архивные элементы из базы данных субграфа до заданного блока, а удаление неиспользуемых элементов из базы данных субграфа улучшает производительность запросов, зачастую значительно. Использование `indexerHints` — это простой способ выполнить сокращение субграфа. -## How to Prune a Subgraph With `indexerHints` +## Как сократить субграф с помощью `indexerHints` -Add a section called `indexerHints` in the manifest. +Добавьте раздел с названием 'indexerHints' в манифест. -`indexerHints` has three `prune` options: +`indexerHints` имеет три опции `prune`: -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. +- `prune: auto`: Сохраняет минимально необходимую историю, установленную Индексатором, оптимизируя производительность запросов. Это рекомендуется как основная настройка и является настройкой по умолчанию для всех субграфов, созданных с помощью `graph-cli` версии >= 0.66.0. +- `prune: `: Устанавливает пользовательский предел на количество исторических блоков, которые следует сохранить. +- `prune: never`: без сокращения исторических данных; сохраняет всю историю и является значением по умолчанию, если раздел `indexerHints` отсутствует. `prune: never` следует выбрать, если требуются [Запросы на путешествия во времени](/subgraphs/querying/graphql-api/#time-travel-queries). -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: +Мы можем добавить `indexerHints` в наши субграфы, обновив наш файл `subgraph.yaml`: ```yaml specVersion: 1.0.0 @@ -30,26 +31,26 @@ dataSources: network: mainnet ``` -## Important Considerations +## Важные замечания -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. +- Если требуются [Запросы на путешествия во времени](/subgraphs/querying/graphql-api/#time-travel-queries) и при этом нужно выполнить сокращение данных, сокращение необходимо выполнить точно, чтобы сохранить функциональность запросов на путешествия во времени. По этой причине обычно не рекомендуется использовать `indexerHints: prune: auto` с запросами на путешествия во времени. Вместо этого следует выполнить сокращение, используя `indexerHints: prune: `, чтобы выполнить точное сокращение до высоты блока, которое сохранит исторические данные, необходимые для запросов на путешествия во времени, или использовать `prune: never`, чтобы сохранить все данные. -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). +- Невозможно выполнить [графтинг](/subgraphs/cookbook/grafting/) на высоте блока, который был сокращен. Если графтинг выполняется регулярно и требуется сокращение данных, рекомендуется использовать `indexerHints: prune: `, чтобы точно сохранить необходимое количество блоков (например, достаточное для шести месяцев). -## Conclusion +## Заключение -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. +Сокращение с использованием `indexerHints` — это наилучшая практика при разработке субграфов, обеспечивающая значительное улучшение производительности запросов. -## Subgraph Best Practices 1-6 +## Лучшие практики для субграфов 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Увеличение скорости запросов с помощью обрезки субграфов](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Улучшение индексирования и отклика запросов с использованием @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Улучшение индексирования и производительности запросов с использованием неизменяемых объектов и байтов в качестве идентификаторов](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Увеличение скорости индексирования путем избегания `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Упрощение и оптимизация с помощью временных рядов и агрегаций](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Использование переноса (графтинга) для быстрого развертывания исправлений](/subgraphs/cookbook/grafting-hotfix/) From 9a982b766955bb2853247aee652ad1a30ad44497 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:14 -0500 Subject: [PATCH 0727/1534] New translations pruning.mdx (Swedish) --- website/src/pages/sv/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/sv/subgraphs/cookbook/pruning.mdx b/website/src/pages/sv/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/sv/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 5da24266783bca0b5fdb791d6e1cc8bef6af6e8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:15 -0500 Subject: [PATCH 0728/1534] New translations pruning.mdx (Turkish) --- .../pages/tr/subgraphs/cookbook/pruning.mdx | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/pruning.mdx b/website/src/pages/tr/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..f2525ffe92a5 100644 --- a/website/src/pages/tr/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/pruning.mdx @@ -1,22 +1,23 @@ --- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +title: Subgraph Örnek Uygulama 1 - Subgraph Budama ile Sorgu Hızını Artırın +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- -## TLDR +## Özet -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. +[Budama](/developing/creating-a-subgraph/#prune), bir subgraph'in veritabanından arşivlenmiş varlıkları istenilen bir bloka kadar kaldırır. Bir subgraph'in veritabanından kullanılmayan varlıkların kaldırılması, subgraph'in sorgu performansını genellikle kayda değer ölçüde artırır. `indexerHints` kullanmak, bir subgraph'i budamayı kolaylaştırır. -## How to Prune a Subgraph With `indexerHints` +## `indexerHints` ile Bir Subgraph'i Nasıl Budayabilirsiniz -Add a section called `indexerHints` in the manifest. +Manifestoya `indexerHints` adlı bir bölüm ekleyin. -`indexerHints` has three `prune` options: +`indexerHints` üç `prune` (budama) seçeneğine sahiptir: -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. +- `prune: auto`: Endeksleyici tarafından belirlenen asgari gerekli geçmişi koruyarak sorgu performansını optimize eder. Bu genellikle önerilen ayardır ve `graph-cli` >= 0.66.0 tarafından oluşturulan tüm subgraph'ler için varsayılandır. +- `prune: `: Korunacak olan geçmiş blokların sayısı için özel bir limit belirler. +- `prune: never`: Geçmiş verilerin budanması yoktur; tüm geçmişi korur. `indexerHints` bölümü yoksa `prune: never` varsayılandır. [Zaman Yolculuğu Sorguları](/subgraphs/querying/graphql-api/#time-travel-queries) isteniyorsa `prune: never` seçilmelidir. -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: +`subgraph.yaml` dosyamızı güncelleyerek subgraph'lerimize `indexerHints` ekleyebiliriz: ```yaml specVersion: 1.0.0 @@ -30,26 +31,26 @@ dataSources: network: mainnet ``` -## Important Considerations +## Önemli Hususlar -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. +- [Zaman Yolculuğu Sorguları](/subgraphs/querying/graphql-api/#time-travel-queries) budama ile birlikte isteniyorsa, bu sorgular işlevselliğini korumak için budama doğru bir şekilde gerçekleştirilmelidir. Bu nedenle, genellikle `indexerHints: prune: auto`'yu Zaman Yolculuğu Sorguları ile kullanmak önerilmez. Bunun yerine, Zaman Yolculuğu Sorgularının gerektirdiği tarihsel verileri koruyan bir blok yüksekliğine doğru şekilde budamak için `indexerHints: prune: ` kullanın veya tüm veriyi korumak için `prune: never` kullanarak budama yapmamayı seçin. -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). +- Budanan bir blok yüksekliğinde [aşılama](/subgraphs/cookbook/grafting/) yapılamaz. Aşılama rutin olarak yapılıyorsa ve budama isteniyorsa, belirli bir blok sayısını (örneğin, altı ay yetecek kadar) doğru bir şekilde koruyacak `indexerHints: prune: ` ayarı kullanılması önerilir. -## Conclusion +## Sonuç -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. +`indexerHints` kullanarak budama, subgraph geliştirmesi için örnek uygulamadır ve sorgu performansında önemli iyileştirmeler sunar. -## Subgraph Best Practices 1-6 +## Subgraph Örnek Uygulamalar 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Subgraph Budama ile Sorgu Hızını İyileştirin](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [@derivedFrom Kullanarak Endeksleme ve Sorgu Yanıt Hızını Artırın](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Değişmez Varlıklar ve Bytes ID'ler Kullanarak Endeksleme ve Sorgu Performansını Artırın](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Endeksleme Hızını `eth_calls`'den Kaçınarak İyileştirin](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Zaman Serileri ve Bütünleştirme ile Basitleştirin ve Optimize Edin](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Hızlı Düzeltme Dağıtımı için Aşılama Kullanın](/subgraphs/cookbook/grafting-hotfix/) From fcdca16903905f78a7b3c30c6d53a75e4f7b7ac6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:16 -0500 Subject: [PATCH 0729/1534] New translations pruning.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/uk/subgraphs/cookbook/pruning.mdx b/website/src/pages/uk/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/uk/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 82c0559f089faaad2d0fd8b03f1dc7c098dc65bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:17 -0500 Subject: [PATCH 0730/1534] New translations pruning.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/zh/subgraphs/cookbook/pruning.mdx b/website/src/pages/zh/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/zh/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 950c30fcc69cfdabdcaa47ab2d2f8fae0dba70ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:18 -0500 Subject: [PATCH 0731/1534] New translations pruning.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ur/subgraphs/cookbook/pruning.mdx b/website/src/pages/ur/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/ur/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From 6e85e1764644b513ebf70fba92c1277ade20c5f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:19 -0500 Subject: [PATCH 0732/1534] New translations pruning.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/vi/subgraphs/cookbook/pruning.mdx b/website/src/pages/vi/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/vi/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From cff7679eff1789bb6682a356b2503a944ea8cc24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:20 -0500 Subject: [PATCH 0733/1534] New translations pruning.mdx (Marathi) --- website/src/pages/mr/subgraphs/cookbook/pruning.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/mr/subgraphs/cookbook/pruning.mdx b/website/src/pages/mr/subgraphs/cookbook/pruning.mdx index c6b1217db9a5..cc66b8c7170b 100644 --- a/website/src/pages/mr/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR From b07afb2a060f75d16e1643927ac4e1f21077dbd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:21 -0500 Subject: [PATCH 0734/1534] New translations pruning.mdx (Hindi) --- website/src/pages/hi/subgraphs/cookbook/pruning.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/pruning.mdx b/website/src/pages/hi/subgraphs/cookbook/pruning.mdx index f2ddfe9da791..74e1ee201228 100644 --- a/website/src/pages/hi/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/pruning.mdx @@ -1,5 +1,6 @@ --- title: सबग्राफ बेस्ट प्रैक्टिस 1 - सबग्राफ प्रूनिंग के साथ क्वेरी की गति में सुधार करें +sidebarTitle: "Subgraph Best Practice 1: Pruning with indexerHints" --- ## TLDR @@ -14,7 +15,7 @@ indexerHints में तीन prune विकल्प होते हैं - prune: auto: आवश्यक न्यूनतम इतिहास को बनाए रखता है जैसा कि Indexer द्वारा निर्धारित किया गया है, जो क्वेरी प्रदर्शन को अनुकूलित करता है। यह सामान्यतः अनुशंसित सेटिंग है और यह सभी subgraphs के लिए डिफ़ॉल्ट है जो graph-cli >= 0.66.0 द्वारा बनाए गए हैं। - `prune: `: ऐतिहासिक ब्लॉकों को बनाए रखने की संख्या पर एक कस्टम सीमा निर्धारित करता है। -- `prune: never`: ऐतिहासिक डेटा का कोई छंटाई नहीं; पूरी इतिहास को बनाए रखता है और यह डिफ़ॉल्ट है यदि indexerHints अनुभाग नहीं है। `prune: never` को तब चुना जाना चाहिए यदि Time Travel Queries (/subgraphs/querying/graphql-api/#time-travel-queries) की आवश्यकता हो। +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. हम अपने 'subgraph' में indexerHints जोड़ सकते हैं हमारे subgraph.yaml को अपडेट करके: @@ -32,9 +33,9 @@ dataSources: ## महत्वपूर्ण विचार -- यदि Time Travel Queries(/subgraphs/querying/graphql-api/#time-travel-queries) की आवश्यकता हो और प्रूनिंग भी की जाए, तो Time Travel Query कार्यक्षमता बनाए रखने के लिए प्रूनिंग को सही ढंग से किया जाना चाहिए। इसलिए, Time Travel Queries के साथ indexerHints: prune: auto का उपयोग करना सामान्यतः अनुशंसित नहीं है। इसके बजाय, उस ब्लॉक ऊंचाई तक सही ढंग से प्रून करने के लिए `indexerHints: prune: ` का उपयोग करें जो Time Travel Queries के लिए आवश्यक ऐतिहासिक डेटा को बनाए रखता है, या prune: never का उपयोग करें ताकि सभी डेटा बनाए रखा जा सके। +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. -- यह 'grafting' (/subgraphs/cookbook/grafting/) को उस ब्लॉक ऊँचाई पर करना संभव नहीं है जो छंटनी (pruned) की गई है। यदि 'grafting' नियमित रूप से किया जाता है और छंटनी (pruning) की आवश्यकता है, तो यह अनुशंसा की जाती है कि `indexerHints: prune: ` का उपयोग किया जाए जो सटीक रूप से एक निश्चित संख्या में ब्लॉकों (जैसे, छह महीने के लिए पर्याप्त) को बनाए रखेगा। +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). ## निष्कर्ष From 1a95b3e5c157760ce8baac455441b1c04bf98c0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:22 -0500 Subject: [PATCH 0735/1534] New translations secure-api-keys-nextjs.mdx (Romanian) --- .../src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx index e37d83acbe78..fc7e0ff52eb4 100644 --- a/website/src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 9b7ff456ce7d9d6462061a99ab918d5bf931d381 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:23 -0500 Subject: [PATCH 0736/1534] New translations secure-api-keys-nextjs.mdx (French) --- .../cookbook/secure-api-keys-nextjs.mdx | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx index df8f8b2b28ea..cd3b3b46b7f9 100644 --- a/website/src/pages/fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -1,48 +1,48 @@ --- -title: How to Secure API Keys Using Next.js Server Components +title: Comment sécuriser les clés d'API en utilisant les composants serveur de Next.js --- ## Aperçu -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +Nous pouvons utiliser [les composants serveur de Next.js](https://nextjs.org/docs/app/building-your-application/rendering/server-components) pour sécuriser correctement notre clé API contre l'exposition dans le frontend de notre dapp. Pour augmenter encore la sécurité de notre clé API, nous pouvons également [restreindre notre clé API à certains subgraphs ou domaines dans Subgraph Studio.](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +Dans ce guide pratique, nous allons passer en revue la création d'un composant de serveur Next.js qui interroge un subgraph tout en masquant la clé API du frontend. -### Caveats +### Mise en garde -- Next.js server components do not protect API keys from being drained using denial of service attacks. -- The Graph Network gateways have denial of service detection and mitigation strategies in place, however using server components may weaken these protections. -- Next.js server components introduce centralization risks as the server can go down. +- Les composants serveur de Next.js ne protègent pas les clés API contre les attaques de déni de service. +- Les passerelles de The Graph Network disposent de stratégies de détection et d'atténuation des attaques de déni de service, cependant, l'utilisation des composants serveur peut affaiblir ces protections. +- Les composants serveur de Next.js introduisent des risques de centralisation car le serveur peut tomber en panne. -### Why It's Needed +### Pourquoi est-ce nécessaire -In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. +Dans une application React standard, les clés API incluses dans le code frontend peuvent être exposées du côté client, posant un risque de sécurité. Bien que les fichiers `.env` soient couramment utilisés, ils ne protègent pas complètement les clés car le code de React est exécuté côté client, exposant ainsi la clé API dans les headers. Les composants serveur Next.js résolvent ce problème en gérant les opérations sensibles côté serveur. -### Using client-side rendering to query a subgraph +### Utilisation du rendu côté client pour interroger un subgraph -![Client-side rendering](/img/api-key-client-side-rendering.png) +![rendu côté client](/img/api-key-client-side-rendering.png) -### Conditions préalables +### Prerequisites -- An API key from [Subgraph Studio](https://thegraph.com/studio) -- Basic knowledge of Next.js and React. -- An existing Next.js project that uses the [App Router](https://nextjs.org/docs/app). +- Une clé API provenant de [Subgraph Studio](https://thegraph.com/studio) +- Une connaissance de base de Next.js et React. +- Un projet Next.js existant qui utilise l'[App Router](https://nextjs.org/docs/app). -## Step-by-Step Cookbook +## Guide étape par étape -### Step 1: Set Up Environment Variables +### Étape 1 : Configurer les Variables d'Environnement -1. In our Next.js project root, create a `.env.local` file. -2. Add our API key: `API_KEY=`. +1. À la racine de notre projet Next.js, créer un fichier `.env.local` . +2. Ajouter notre clé API :: `API_KEY=`. -### Step 2: Create a Server Component +### Étape 2 : Créer un Composant Serveur -1. In our `components` directory, create a new file, `ServerComponent.js`. -2. Use the provided example code to set up the server component. +1. Dans notre répertoire`components` , créer un nouveau fichier, `ServerComponent.js`. +2. Utiliser le code exemple fourni pour configurer le composant serveur. -### Step 3: Implement Server-Side API Request +### Étape 3 : Implémenter la Requête API Côté Serveur -In `ServerComponent.js`, add the following code: +Dans `ServerComponent.js`, ajouter le code suivant : ```javascript const API_KEY = process.env.API_KEY @@ -95,10 +95,10 @@ export default async function ServerComponent() { } ``` -### Step 4: Use the Server Component +### Étape 4 : Utiliser le Composant Serveur -1. In our page file (e.g., `pages/index.js`), import `ServerComponent`. -2. Render the component: +1. Dans notre fichier de page (par exemple, `pages/index.js`), importer `ServerComponent`. +2. Rendu du composant: ```javascript import ServerComponent from './components/ServerComponent' @@ -112,12 +112,12 @@ export default function Home() { } ``` -### Step 5: Run and Test Our Dapp +### Étape 5 : Lancer et tester notre Dapp -Start our Next.js application using `npm run dev`. Verify that the server component is fetching data without exposing the API key. +Démarrez notre application Next.js en utilisant `npm run dev`. Vérifiez que le composant serveur récupère les données sans exposer la clé API. -![Server-side rendering](/img/api-key-server-side-rendering.png) +![Rendu côté serveur](/img/api-key-server-side-rendering.png) ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 17eac00f8d672465a001676efcb130c3c84328f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:24 -0500 Subject: [PATCH 0737/1534] New translations secure-api-keys-nextjs.mdx (Spanish) --- .../pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 2b5b9632ceb0..07b297aff006 100644 --- a/website/src/pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/es/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ In a standard React application, API keys included in the frontend code can be e ![Client-side rendering](/img/api-key-client-side-rendering.png) -### Prerrequisitos +### Prerequisites - An API key from [Subgraph Studio](https://thegraph.com/studio) - Basic knowledge of Next.js and React. @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 6dbc8993e95816c338ce646d4c99682cbb8f8c1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:25 -0500 Subject: [PATCH 0738/1534] New translations secure-api-keys-nextjs.mdx (Arabic) --- .../pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 23845a17520d..485f597e25ba 100644 --- a/website/src/pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ In a standard React application, API keys included in the frontend code can be e ![Client-side rendering](/img/api-key-client-side-rendering.png) -### المتطلبات الأساسية +### Prerequisites - An API key from [Subgraph Studio](https://thegraph.com/studio) - Basic knowledge of Next.js and React. @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From f9893e4f27883d45a6fea3b51cbbdaed872db8ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:26 -0500 Subject: [PATCH 0739/1534] New translations secure-api-keys-nextjs.mdx (Czech) --- .../pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 12a504471cb7..de502a0ed526 100644 --- a/website/src/pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ Ve standardní aplikaci React mohou být klíče API obsažené v kódu frontend ![Client-side rendering](/img/api-key-client-side-rendering.png) -### Požadavky +### Prerequisites - Klíč API od [Subgraph Studio](https://thegraph.com/studio) - Základní znalosti Next.js a React. @@ -120,4 +120,4 @@ Spusťte naši aplikaci Next.js pomocí `npm run dev`. Ověřte, že serverová ### Závěr -Použitím serverových komponent Next.js jsme efektivně skryli klíč API před klientskou stranou, čímž jsme zvýšili bezpečnost naší aplikace. Tato metoda zajišťuje, že citlivé operace jsou zpracovávány na straně serveru, mimo potenciální zranitelnosti na straně klienta. Nakonec nezapomeňte prozkoumat [další opatření pro zabezpečení klíče API](/cookbook/upgrading-a-subgraph/#securing-your-api-key), abyste ještě více zvýšili zabezpečení svého klíče API. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 0eb0d2ae54333990afd6f571ae01b8ac4ac19b19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:27 -0500 Subject: [PATCH 0740/1534] New translations secure-api-keys-nextjs.mdx (German) --- .../pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx index e37d83acbe78..4122439152b8 100644 --- a/website/src/pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/de/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -2,7 +2,7 @@ title: How to Secure API Keys Using Next.js Server Components --- -## Overview +## Überblick We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 59f8d61f3034640aee97b46d3fb880f0f86963db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:28 -0500 Subject: [PATCH 0741/1534] New translations secure-api-keys-nextjs.mdx (Italian) --- .../src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 5c3778edb064..fba106e6eaf6 100644 --- a/website/src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/it/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 74342517d2d0059e4b91ca116ca27d9d8aac2a15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:29 -0500 Subject: [PATCH 0742/1534] New translations secure-api-keys-nextjs.mdx (Japanese) --- .../pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx index ba4a9e799615..bac42648b0fc 100644 --- a/website/src/pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ In a standard React application, API keys included in the frontend code can be e ![Client-side rendering](/img/api-key-client-side-rendering.png) -### 前提条件 +### Prerequisites - An API key from [Subgraph Studio](https://thegraph.com/studio) - Basic knowledge of Next.js and React. @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 06c73bc825ef6e3e027065673c578537bce899b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:30 -0500 Subject: [PATCH 0743/1534] New translations secure-api-keys-nextjs.mdx (Korean) --- .../src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx index e37d83acbe78..fc7e0ff52eb4 100644 --- a/website/src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 37f8fe3853d21e6ad26678fee8c9c883c1bb6006 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:31 -0500 Subject: [PATCH 0744/1534] New translations secure-api-keys-nextjs.mdx (Dutch) --- .../src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx index e37d83acbe78..fc7e0ff52eb4 100644 --- a/website/src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 057a7ffcf2773ff85712d7879741217197ff10df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:32 -0500 Subject: [PATCH 0745/1534] New translations secure-api-keys-nextjs.mdx (Polish) --- .../src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx index e37d83acbe78..fc7e0ff52eb4 100644 --- a/website/src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 7e9220f5c38198187123056b74c66728a319a24f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:33 -0500 Subject: [PATCH 0746/1534] New translations secure-api-keys-nextjs.mdx (Portuguese) --- .../pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 5bb8b017c0bc..768ee1418880 100644 --- a/website/src/pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ Num aplicativo de React normal, chaves API incluídas no código do frontend pod ![Renderização client-side](/img/api-key-client-side-rendering.png) -### Pré-requisitos +### Prerequisites - Uma chave API do [Subgraph Studio](https://thegraph.com/studio) - Conhecimentos básicos de Next.js e React. @@ -120,4 +120,4 @@ Inicie o nosso aplicativo Next.js com `npm run dev`. Verifique se o componente d ### Conclusão -Ao utilizar os Componentes de Servidor Next.js, nós praticamente escondemos a chave API do client-side, o que aumenta a segurança do nosso aplicativo. Com este método, operações sensíveis podem ser executadas server-side, longe de vulnerabilidades em potencial no client-side. E finalmente, orientamos que explore [outras medidas de segurança de chave API](/cookbook/upgrading-a-subgraph/#securing-your-api-key) para aumentar ainda mais a sua segurança de chaves API. +Ao utilizar os Componentes de Servidor Next.js, nós praticamente escondemos a chave API do client-side, o que aumenta a segurança do nosso aplicativo. Com este método, operações sensíveis podem ser executadas server-side, longe de vulnerabilidades em potencial no client-side. E finalmente, orientamos que explore [outras medidas de segurança de chave API](/subgraphs/querying/managing-api-keys/) para aumentar ainda mais a sua segurança de chaves API. From d204da020b86ad71d125712e38c4d6a6fd12d49c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:34 -0500 Subject: [PATCH 0747/1534] New translations secure-api-keys-nextjs.mdx (Russian) --- .../cookbook/secure-api-keys-nextjs.mdx | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx index d0e6eca00359..963188b67823 100644 --- a/website/src/pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -1,48 +1,48 @@ --- -title: How to Secure API Keys Using Next.js Server Components +title: Как обезопасить API-ключи с использованием серверных компонентов Next.js --- ## Обзор -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +Мы можем использовать [серверные компоненты Next.js](https://nextjs.org/docs/app/building-your-application/rendering/server-components), чтобы должным образом защитить наш API-ключ от взлома во внешнем интерфейсе нашего децентрализованного приложения (dapp). Чтобы дополнительно повысить безопасность API-ключа, мы также можем [ограничить доступ к нашему API-ключу для определённых субграфов или доменов в Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +В этом руководстве мы рассмотрим, как создать серверный компонент Next.js, который запрашивает субграф, одновременно скрывая API-ключ от фронтенда. -### Caveats +### Предостережения -- Next.js server components do not protect API keys from being drained using denial of service attacks. -- The Graph Network gateways have denial of service detection and mitigation strategies in place, however using server components may weaken these protections. -- Next.js server components introduce centralization risks as the server can go down. +- Серверные компоненты Next.js не защищают API-ключи от утечки при атаках типа "отказ в обслуживании". +- Шлюзы The Graph Network имеют стратегии обнаружения и смягчения атак типа "отказ в обслуживании", однако использование серверных компонентов может ослабить эти защиты. +- Серверные компоненты Next.js вносят риски централизации, так как сервер может выйти из строя. -### Why It's Needed +### Почему это необходимо -In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. +В стандартном React-приложении API-ключи, включённые в код внешнего интерфейса, могут быть раскрыты на стороне клиента, что созает угрозу безопасности. Хотя обычно используются файлы `.env`, они не обеспечивают полной защиты ключей, так как код React выполняется на стороне клиента, раскрывая API-ключ в заголовках. Серверные компоненты Next.js решают эту проблему, обрабатывая конфиденциальные операции на сервере. -### Using client-side rendering to query a subgraph +### Использование рендеринга на клиентской стороне для запроса к субграфу -![Client-side rendering](/img/api-key-client-side-rendering.png) +![Рендеринг на клиентской стороне](/img/api-key-client-side-rendering.png) -### Предварительные требования +### Prerequisites -- An API key from [Subgraph Studio](https://thegraph.com/studio) -- Basic knowledge of Next.js and React. -- An existing Next.js project that uses the [App Router](https://nextjs.org/docs/app). +- API-ключ от [Subgraph Studio](https://thegraph.com/studio) +- Базовые знания о Next.js и React. +- Существующий проект Next.js, который использует [App Router](https://nextjs.org/docs/app). -## Step-by-Step Cookbook +## Пошаговое руководство -### Step 1: Set Up Environment Variables +### Шаг 1: Настройка переменных среды -1. In our Next.js project root, create a `.env.local` file. -2. Add our API key: `API_KEY=`. +1. В корневой папке нашего проекта Next.js создайте файл `.env.local`. +2. Добавьте наш API-ключ: `API_KEY=`. -### Step 2: Create a Server Component +### Шаг 2: Создание серверного компонента -1. In our `components` directory, create a new file, `ServerComponent.js`. -2. Use the provided example code to set up the server component. +1. В директории `components` создайте новый файл `ServerComponent.js`. +2. Используйте приведённый пример кода для настройки серверного компонента. -### Step 3: Implement Server-Side API Request +### Шаг 3: Реализация API-запроса на стороне сервера -In `ServerComponent.js`, add the following code: +В файл `ServerComponent.js` добавьте следующий код: ```javascript const API_KEY = process.env.API_KEY @@ -95,10 +95,10 @@ export default async function ServerComponent() { } ``` -### Step 4: Use the Server Component +### Шаг 4: Использование серверного компонента -1. In our page file (e.g., `pages/index.js`), import `ServerComponent`. -2. Render the component: +1. В файл страницы (например, `pages/index.js`) импортируйте `ServerComponent`. +2. Отрендерите компонент: ```javascript import ServerComponent from './components/ServerComponent' @@ -112,12 +112,12 @@ export default function Home() { } ``` -### Step 5: Run and Test Our Dapp +### Шаг 5: Запуск и тестирование нашего децентрализованного приложения (Dapp) -Start our Next.js application using `npm run dev`. Verify that the server component is fetching data without exposing the API key. +Запустите наше приложение Next.js с помощью команды `npm run dev`. Убедитесь, что серверный компонент запрашивает данные, не раскрывая API-ключ. -![Server-side rendering](/img/api-key-server-side-rendering.png) +![Рендеринг на стороне сервера](/img/api-key-server-side-rendering.png) -### Conclusion +### Заключение -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +Используя серверные компоненты Next.js, мы эффективно скрыли ключ API от клиентской стороны, улучшив безопасность нашего приложения. Этот метод гарантирует, что чувствительные операции обрабатываются на сервере, вдали от потенциальных уязвимостей на стороне клиента. В заключение, не забудьте ознакомиться с [другими мерами безопасности для ключей API](/subgraphs/querying/managing-api-keys/), чтобы повысить уровень безопасности своего ключа API. From b12b70a506d31b7dc54f32310c3264f5d5bb001e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:35 -0500 Subject: [PATCH 0748/1534] New translations secure-api-keys-nextjs.mdx (Swedish) --- .../pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx index dfc61ce365d0..a9e82a6baa72 100644 --- a/website/src/pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ In a standard React application, API keys included in the frontend code can be e ![Client-side rendering](/img/api-key-client-side-rendering.png) -### Förutsättningar +### Prerequisites - An API key from [Subgraph Studio](https://thegraph.com/studio) - Basic knowledge of Next.js and React. @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 2786fe2c224f733a8e4381b8b7fd5156e768446f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:36 -0500 Subject: [PATCH 0749/1534] New translations secure-api-keys-nextjs.mdx (Turkish) --- .../cookbook/secure-api-keys-nextjs.mdx | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx index b3eff9269cea..50a3741afa22 100644 --- a/website/src/pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -1,48 +1,48 @@ --- -title: How to Secure API Keys Using Next.js Server Components +title: Next.js Sunucu Bileşenlerini Kullanarak API Anahtarları Nasıl Güvenli Bir Şekilde Kullanılır --- ## Genel Bakış -We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). +API anahtarımızı dapp'imizin ön yüzünde açığa çıkmasını düzgün bir şekilde engellemek için [Next.js sunucu bileşenlerini](https://nextjs.org/docs/app/building-your-application/rendering/server-components) kullanabiliriz. API anahtarımızın güvenliğini daha da artırmak için, ayrıca [API anahtarımızı belirli subgraph'lar veya Subgraph Studio'daki alanlarla sınırlandırabiliriz](/cookbook/upgrading-a-subgraph/#securing-your-api-key). -In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. +Bu talimatlarda, bir subgraph'i sorgularken aynı zamanda API anahtarını ön yüzden gizleyen bir Next.js sunucu bileşeni oluşturmayı ele alacağız. -### Caveats +### Kısıtlamalar -- Next.js server components do not protect API keys from being drained using denial of service attacks. -- The Graph Network gateways have denial of service detection and mitigation strategies in place, however using server components may weaken these protections. -- Next.js server components introduce centralization risks as the server can go down. +- Next.js sunucu bileşenleri, servis dışı bırakma saldırıları ile API anahtarlarının boşaltılmasına karşı koruma sağlamaz. +- The Graph Ağ geçitleri, servis dışı bırakma saldırı tespiti ve saldırıyı hafifletme stratejilerine sahiptir. Ancak sunucu bileşenlerini kullanmak bu korumaları zayıflatabilir. +- Next.js sunucu bileşenleri, sunucunun çökmesi ihtimali dolayısıyla merkezileşme riskleri taşır. -### Why It's Needed +### Neden Gerekli -In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. +Standart bir React uygulamasında, ön yüz koduna dahil edilen API anahtarları istemci tarafında açığa çıkabilir ve güvenlik riski oluşturabilir. `.env` dosyaları yaygın olarak kullanılsa da React kodu istemci tarafında çalıştığı için anahtarları tam olarak korumazlar ve API anahtarı başlıklarda açığa çıkar. Next.js Sunucu Bileşenleri bu sorunu, hassas işlemleri sunucu tarafında yürüterek çözer. -### Using client-side rendering to query a subgraph +### Bir subgraph'i sorgulamak için istemci tarafında işleme (render) -![Client-side rendering](/img/api-key-client-side-rendering.png) +![İstemci tarafında işleme](/img/api-key-client-side-rendering.png) -### Ön Koşullar +### Prerequisites -- An API key from [Subgraph Studio](https://thegraph.com/studio) -- Basic knowledge of Next.js and React. -- An existing Next.js project that uses the [App Router](https://nextjs.org/docs/app). +- [Subgraph Studio](https://thegraph.com/studio)'dan bir API anahtarı +- Temel Next.js ve React bilgisi. +- [Uygulama Yönlendiricisi](https://nextjs.org/docs/app) kullanan mevcut bir Next.js projesi. -## Step-by-Step Cookbook +## Adım Adım Talimatlar -### Step 1: Set Up Environment Variables +### Adım 1: Ortam Değişkenlerini Ayarlayın -1. In our Next.js project root, create a `.env.local` file. -2. Add our API key: `API_KEY=`. +1. Next.js projemizin kök dizininde `.env.local` dosyası oluşturun. +2. API anahtarımızı ekleyin: `API_KEY=`. -### Step 2: Create a Server Component +### Adım 2: Bir Sunucu Bileşeni Oluşturma -1. In our `components` directory, create a new file, `ServerComponent.js`. -2. Use the provided example code to set up the server component. +1. `components` dizinimizde "ServerComponent.js" adında yeni bir dosya oluşturun. +2. Sunucu bileşenini kurmak için sağlanan örnek kodu kullanın. -### Step 3: Implement Server-Side API Request +### Adım 3: Sunucu Tarafı API İsteğini Gerçekleştirin -In `ServerComponent.js`, add the following code: +`ServerComponent.js`'e aşağıdaki kodu ekleyin: ```javascript const API_KEY = process.env.API_KEY @@ -95,10 +95,10 @@ export default async function ServerComponent() { } ``` -### Step 4: Use the Server Component +### Adım 4: Sunucu Bileşenini Kullanın -1. In our page file (e.g., `pages/index.js`), import `ServerComponent`. -2. Render the component: +1. Sayfa dosyamızda (örneğin, `pages/index.js`), `ServerComponent`'ı içe aktarın. +2. Bileşeni işleyin: ```javascript import ServerComponent from './components/ServerComponent' @@ -112,12 +112,12 @@ export default function Home() { } ``` -### Step 5: Run and Test Our Dapp +### Adım 5: Dapp'imizi Çalıştırın ve Test Edin -Start our Next.js application using `npm run dev`. Verify that the server component is fetching data without exposing the API key. +`npm run dev` komutunu kullanarak Next.js uygulamamızı başlatın. Sunucu bileşeninin API anahtarını açığa çıkarmadan veri çektiğini doğrulayın. -![Server-side rendering](/img/api-key-server-side-rendering.png) +![Sunucu-taraflı işleme](/img/api-key-server-side-rendering.png) -### Conclusion +### Sonuç -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +Next.js Sunucu Bileşenlerini kullanarak, API anahtarını istemci tarafında gizlemeyi başardık ve bu da uygulamamızın güvenliğini artırdı. Bu yöntem, hassas işlemlerin potansiyel istemci-taraflı güvenlik açıklıklarından uzak bir şekilde sunucu tarafında ele alındığını garanti eder. Son olarak, API anahtar güvenliğinizi daha da artırmak için [diğer API anahtar güvenlik önlemlerini](/subgraphs/querying/managing-api-keys/) incelemeyi unutmayın. From 4e2367bcaeb58bb44dfc3e70c9cd7c1784e337f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:37 -0500 Subject: [PATCH 0750/1534] New translations secure-api-keys-nextjs.mdx (Ukrainian) --- .../src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx index e37d83acbe78..fc7e0ff52eb4 100644 --- a/website/src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From bf95efcef461097473eba8c7e62e02f781810664 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:38 -0500 Subject: [PATCH 0751/1534] New translations secure-api-keys-nextjs.mdx (Chinese Simplified) --- .../pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 00e49384691c..ae2201109356 100644 --- a/website/src/pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ In a standard React application, API keys included in the frontend code can be e ![Client-side rendering](/img/api-key-client-side-rendering.png) -### 先决条件 +### Prerequisites - An API key from [Subgraph Studio](https://thegraph.com/studio) - Basic knowledge of Next.js and React. @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 56c7a3f2a34cf7a055825277e8a3ef7de5b34491 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:39 -0500 Subject: [PATCH 0752/1534] New translations secure-api-keys-nextjs.mdx (Urdu (Pakistan)) --- .../pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx index f5735d5627c3..132c4cd07884 100644 --- a/website/src/pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ In a standard React application, API keys included in the frontend code can be e ![Client-side rendering](/img/api-key-client-side-rendering.png) -### شرطیں +### Prerequisites - An API key from [Subgraph Studio](https://thegraph.com/studio) - Basic knowledge of Next.js and React. @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 06d555e3c40b2364adc3639298f861dde6cc4ff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:40 -0500 Subject: [PATCH 0753/1534] New translations secure-api-keys-nextjs.mdx (Vietnamese) --- .../pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx index 7c0461051202..e83414fea5e5 100644 --- a/website/src/pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ In a standard React application, API keys included in the frontend code can be e ![Client-side rendering](/img/api-key-client-side-rendering.png) -### Điều kiện tiên quyết +### Prerequisites - An API key from [Subgraph Studio](https://thegraph.com/studio) - Basic knowledge of Next.js and React. @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 3baafa9aae3ad13c4f78f6a8b9eb1f344b6a58c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:42 -0500 Subject: [PATCH 0754/1534] New translations secure-api-keys-nextjs.mdx (Marathi) --- .../pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx index d4e672df4fdb..d5ff1b146dfd 100644 --- a/website/src/pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,7 +22,7 @@ In a standard React application, API keys included in the frontend code can be e ![Client-side rendering](/img/api-key-client-side-rendering.png) -### पूर्वतयारी +### Prerequisites - An API key from [Subgraph Studio](https://thegraph.com/studio) - Basic knowledge of Next.js and React. @@ -120,4 +120,4 @@ Start our Next.js application using `npm run dev`. Verify that the server compon ### Conclusion -By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From 44675441de81e4d86bdc1e1e5c539cddf2c6ab41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:43 -0500 Subject: [PATCH 0755/1534] New translations secure-api-keys-nextjs.mdx (Hindi) --- .../pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx b/website/src/pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx index cce3d8339979..a3cea804e850 100644 --- a/website/src/pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/secure-api-keys-nextjs.mdx @@ -22,9 +22,9 @@ title: कैसे सुरक्षित करें API Keys का उप ![Client-side rendering](/img/api-key-client-side-rendering.png) -### आवश्यक शर्तें +### Prerequisites -- [Subgraph Studio](https://thegraph.com/studio) से एक API कुंजी +- [Subgraph Studio](https://thegraph.com/studio) से एक API कुंजी - Next.js और React का बुनियादी ज्ञान - एक मौजूदा Next.js प्रोजेक्ट जो App Router (https://nextjs.org/docs/app). का उपयोग करता है। @@ -120,4 +120,4 @@ export default function Home() { ### निष्कर्ष -Next.js Server Components का उपयोग करके, हमने प्रभावी रूप से API कुंजी को क्लाइंट-साइड से छिपा दिया है, जिससे हमारे एप्लिकेशन की सुरक्षा में वृद्धि हुई है। यह विधि सुनिश्चित करती है कि संवेदनशील संचालन सर्वर-साइड पर, संभावित क्लाइंट-साइड कमजोरियों से दूर, संभाले जाते हैं। अंत में, अपने API कुंजी की सुरक्षा बढ़ाने के लिए other API key security measures (/cookbook/upgrading-a-subgraph/#securing-your-api-key) का अन्वेषण करना न भूलें। +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/subgraphs/querying/managing-api-keys/) to increase your API key security even further. From f3dbafdde7dd2e630b3e7a3400d874e0ac5995b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:45 -0500 Subject: [PATCH 0756/1534] New translations subgraph-debug-forking.mdx (French) --- .../cookbook/subgraph-debug-forking.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/fr/subgraphs/cookbook/subgraph-debug-forking.mdx index a8292c5b0d86..cedcf3ece5c4 100644 --- a/website/src/pages/fr/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -6,9 +6,9 @@ As with many systems processing large amounts of data, The Graph's Indexers (Gra ## D'accord, qu'est-ce que c'est ? -**Subgraph forking** est le processus qui consiste à récupérer paresseusement des entités du magasin d'un _autre_ subgraph (généralement un magasin distant). +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -Dans le contexte du débogage, un **subgraph fork** vous permet de déboguer votre subgraph défaillant au bloc _X_ sans avoir à attendre la synchronisation avec le bloc _X_. +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## Quoi ? Comment ? @@ -18,9 +18,9 @@ In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph N ## S'il vous plaît, montrez-moi du code ! -Pour rester concentré sur le débogage des subgraphs, gardons les choses simples et exécutons le [exemple-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexant le contrat intelligent Ethereum Gravity. +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -Voici les gestionnaires définis pour l'indexation des `Gravatar`s, exempts de tout bogue : +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -53,9 +53,9 @@ La méthode habituelle pour tenter de résoudre le problème est la suivante : 3. Attendez qu’il soit synchronisé. 4. S'il se casse à nouveau, revenez au point 1, sinon : Hourra ! -C'est en effet assez familier avec un processus de débogage ordinaire, mais il y a une étape qui ralentit horriblement le processus : _3. Attendez qu'il se synchronise._ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -En utilisant **le forçage de subgraphs**, nous pouvons essentiellement éliminer cette étape. Voici à quoi cela ressemble : +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Apportez une modification à la source des mappings qui, selon vous, résoudra le problème. @@ -69,14 +69,14 @@ Maintenant, vous pouvez avoir 2 questions : Je réponds : -1. `fork-base` est l'URL "de base", de sorte que lorsque l'_id du subgraph_ est ajouté, l'URL résultante (`/`) est un point de terminaison GraphQL valide pour le magasin du subgraph. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. Fourcher est facile, pas besoin de transpirer : ```bash -$ graph deploy --debug-fork -id> ; --ipfs http://localhost:5001 --node http://localhost:8020 +$ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -N'oubliez pas non plus de définir le champ `dataSources.source.startBlock` dans le manifeste du subgraph au numéro du bloc problématique, afin de ne pas indexer les blocs inutiles et de profiter du fork ! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Voici donc ce que je fais : @@ -90,12 +90,12 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. Après une inspection minutieuse, je remarque qu'il y a un décalage dans les représentations des `identifiants` utilisés lors de l'indexation des `Gravatars` dans mes deux gestionnaires. Alors que `handleNewGravatar` le convertit en hexadécimal (`event.params.id.toHex()`), `handleUpdatedGravatar` utilise un int32 (`event.params.id.toI32()`), ce qui provoque la panique de `handleUpdatedGravatar` avec "Gravatar not found!". J'ai fait en sorte que les deux convertissent l'`identifiant` en hexadécimal. +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` -4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +4. J'inspecte les logs générés par le Graph Node local et, Hourra!, tout semble fonctionner. +5. Je déploie mon subgraph, désormais débarrassé de tout bug, sur un Graph Node distant et vis heureux pour toujours ! (Malheureusement pas de patates, mais c’est la vie…) From e10e2ab2e18ae33da89c4b95176d4d7a4ba8fabe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:46 -0500 Subject: [PATCH 0757/1534] New translations subgraph-debug-forking.mdx (Spanish) --- .../cookbook/subgraph-debug-forking.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/es/subgraphs/cookbook/subgraph-debug-forking.mdx index 39633c0fbb49..163a16d59e00 100644 --- a/website/src/pages/es/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/es/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -6,9 +6,9 @@ As with many systems processing large amounts of data, The Graph's Indexers (Gra ## ¿Bien, qué es? -**Subgraph forking** es el proceso de obtener entidades de _otro_ almacén de subgrafos (por lo general uno remoto). +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -En el contexto de la depuración, **subgraph forking** te permite depurar tu subgrafo fallido en el bloque _X_ sin necesidad de esperar a sincronizar para bloquear _X_. +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## ¡¿Qué?! ¿Cómo? @@ -18,9 +18,9 @@ In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph N ## ¡Por favor, muéstrame algo de código! -Para mantenernos enfocados en el debugging de subgrafos, simplifiquemos las cosas y sigamos con el [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) que indexa el contrato inteligente Ethereum Gravity. +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -Estos son los handlers definidos para indexar `Gravatar`s, sin errores de ningún tipo: +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -53,9 +53,9 @@ La forma habitual de intentar una solución es: 3. Espera a que se sincronice. 4. Si se vuelve a romper vuelve a 1, de lo contrario: ¡Hurra! -De hecho, es bastante familiar para un proceso de depuración ordinario, pero hay un paso que ralentiza terriblemente el proceso: _3. Espera a que se sincronice._ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Usando **bifurcación de subgrafo** podemos eliminar esencialmente este paso. Así es como se ve: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Realiza un cambio en la fuente de mapeos, que crees que resolverá el problema. @@ -69,14 +69,14 @@ Ahora, puedes tener 2 preguntas: Y yo respondo: -1. `fork-base` es la URL "base", de modo que cuando se agrega el _id de subgrafo_, la URL resultante (`/`) es un punto final de GraphQL válido para la tienda del subgrafo. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. Bifurcar es fácil, no hay necesidad de sudar: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Además, no olvides configurar el campo `dataSources.source.startBlock` en el manifiesto del subgrafo con el número del bloque problemático, para que puedas omitir la indexación de bloques innecesarios y aprovechar la bifurcación! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Entonces, esto es lo que hago: @@ -90,7 +90,7 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. Después de una inspección cuidadosa, noté que hay una falta de coincidencia en las representaciones de `id` utilizadas al indexar `Gravatar` en mis dos handlers. Mientras que `handleNewGravatar` lo convierte en hexadecimal (`event.params.id.toHex()`), `handleUpdatedGravatar` usa un int32 (`event. params.id.toI32()`) que hace que `handleUpdatedGravatar` entre en pánico con "¡Gravatar no encontrado!". Hago que ambos conviertan el `id` en un hexadecimal. +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash From 7499dcf8f5af9400f7891f5da43cb25748497b88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:47 -0500 Subject: [PATCH 0758/1534] New translations subgraph-debug-forking.mdx (Arabic) --- .../cookbook/subgraph-debug-forking.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ar/subgraphs/cookbook/subgraph-debug-forking.mdx index 6c87d43045c5..3bacc1f60003 100644 --- a/website/src/pages/ar/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -6,9 +6,9 @@ As with many systems processing large amounts of data, The Graph's Indexers (Gra ## حسنا، ما هو؟ -**Subgraph forking** هي عملية جلب الكيانات بشكل lazily من مخزن subgraph _آخر_ (عادةً ما يكون بعيدًا). +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -يسمح لك **subgraph forking** بتصحيح أخطاء الـ subgraph الفاشل في الكتلة(block) _ X _ دون الحاجة للانتظار للمزامنة للكتلة _ X _. +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## ماذا؟! كيف؟ @@ -20,7 +20,7 @@ In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph N To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -فيما يلي المعالجات المعرفة لفهرسة `Gravatar` ، بدون أخطاء على الإطلاق: +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -53,9 +53,9 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St 3. الانتظار حتى تتم المزامنة. 4. إذا حدثت المشكلة مرة أخرى ، فارجع إلى 1! -كما ترى ، فهده الطريقة تشبه عملية تصحيح الأخطاء العادية ، ولكن هناك خطوة واحدة تؤدي إلى إبطاء العملية بشكل رهيب: _ 3. الانتظار حتى تتم المزامنة. _ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -باستخدام **subgraph forking** يمكننا التخلص من تلك الخطوة. إليك كيف يبدو: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. قم بإجراء تغيير في مصدر الـ mappings ، والذي تعتقد أنه سيحل المشكلة. @@ -69,14 +69,14 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St وأنا أجيب: -1. `fork-base` هو عنوان URL "الأساسي" ،فمثلا عند إلحاق _subgraph id_ ، يكون عنوان URL الناتج (`/`) هو GraphQL endpoint صالح لمخزن الـ subgraph. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. الـتفريع سهل ، فلا داعي للقلق: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -أيضًا ، لا تنس تعيين حقل `dataSources.source.startBlock` في subgraph manifest لرقم الكتلة(block) التي بها المشكلة، حتى تتمكن من تخطي فهرسة الكتل الغير ضرورية والاستفادة من التفريع! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! لذلك ، هذا ما أفعله: @@ -90,7 +90,7 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. بعد فحص دقيق ، لاحظت أن هناك عدم تطابق في تمثيلات الـ `id` المستخدمة عند فهرسة `Gravatar` في المعالجين الخاصين بي. بينما `handleNewGravatar` يحول (`event.params.id.toHex()`) إلى سداسي ، `handleUpdatedGravatar` يستخدم int32 (`event.params.id.toI32()`) مما يجعل `handleUpdatedGravatar` قلقا من "Gravatar not found!". أنا أجعلهم كلاهما يحولان `id` إلى سداسي. +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash From e03237d0cc8d6e0ca959323f44d59fc08f62cea1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:48 -0500 Subject: [PATCH 0759/1534] New translations subgraph-debug-forking.mdx (Czech) --- .../cookbook/subgraph-debug-forking.mdx | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/cs/subgraphs/cookbook/subgraph-debug-forking.mdx index 84df907a9602..4673b362c360 100644 --- a/website/src/pages/cs/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,25 +2,25 @@ title: Rychlé a snadné ladění podgrafů pomocí vidliček --- -Stejně jako u mnoha systémů zpracovávajících velké množství dat může indexerům grafu (Graph Nodes) trvat poměrně dlouho, než synchronizují váš podgraf s cílovým blockchainem. Nesoulad mezi rychlými změnami za účelem ladění a dlouhými čekacími dobami potřebnými pro indexaci je extrémně kontraproduktivní a jsme si toho dobře vědomi. To je důvod, proč představujeme **rozvětvování podgrafů**, vyvinutý společností [LimeChain](https://limechain.tech/), a v tomto článku Ukážu vám, jak lze tuto funkci použít k podstatnému zrychlení ladění podgrafů! +As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! ## Ok, co to je? -**podgraf vidličkování** je proces líného načítání entit z _úložiště jiného_ subgrafu (obvykle vzdáleného). +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -V kontextu ladění vám ** vidličkování podgrafů** umožňuje ladit neúspěšný podgraf v bloku _X_, aniž byste museli čekat k synchronizaci zablokovat _X_. +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## Co?! Jak? -Když nasadíte podgraf do vzdáleného uzlu Graf pro indexování a ten selže v bloku _X_, dobrou zprávou je, že uzel Graf bude stále obsluhovat dotazy GraphQL pomocí svého úložiště, které je synchronizováno s blokem _X_. To je skvělé! To znamená, že můžeme využít tohoto "aktuálního" úložiště k opravě chyb vznikajících při indexování bloku _X_. +When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. -Stručně řečeno, _rozvětvíme neúspěšný podgraf_ ze vzdáleného uzlu grafu, u kterého je zaručeno, že podgraf bude indexován až do bloku _X_, abychom lokálně nasazenému podgrafu laděnému v bloku _X_ poskytli aktuální pohled na stav indexování. +In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. ## Ukažte mi prosím nějaký kód! -Abychom se soustředili na ladění podgrafů, zůstaňme u jednoduchých věcí a projděme si [příkladový podgraf](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexující inteligentní kontrakt Ethereum Gravity. +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -Zde jsou definovány obslužné programy pro indexování `Gravatarů` bez jakýchkoli chyb: +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, jak nešťastné, když jsem nasadil můj perfektně vypadající podgraf do [Podgraf Studio](https://thegraph.com/studio/), selhalo to s chybou _"Gravatar nenalezen!"_. +Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. Obvyklý způsob, jak se pokusit o opravu, je: 1. Proveďte změnu ve zdroji mapování, která podle vás problém vyřeší (zatímco já vím, že ne). -2. Znovu nasaďte podgraf do [Subgraph Studio](https://thegraph.com/studio/) (nebo jiného vzdáleného uzlu Graf). +2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). 3. Počkejte na synchronizaci. 4. Pokud se opět rozbije, vraťte se na 1, jinak: Hurá! -Je to skutečně docela podobné běžnému procesu ladění, ale existuje jedna fáze, která ho strašně zpomaluje: _3. Počkejte, až se synchronizuje._ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Pomocí **vidličkování podgrafů** můžeme tento krok v podstatě eliminovat. Takto to vypadá: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Proveďte změnu ve zdroji mapování, která podle vás problém vyřeší. -2. Nasazení do místního uzlu Graf, **_forking selhávajícího podgrafu_** a **_zahájení od problematického bloku_**. +2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. 3. Pokud se opět rozbije, vraťte se na 1, jinak: Hurá! Nyní můžete mít 2 otázky: @@ -69,18 +69,18 @@ Nyní můžete mít 2 otázky: A já odpovídám: -1. `fork-base` je "base" adresa URL, takže když je připojeno _identifikátor podgrafu_, výsledná adresa URL (`/`) je platným koncovým bodem GraphQL pro úložiště podgrafu. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. Vidličkování je snadné, není třeba se potit: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Nezapomeňte také nastavit pole `dataSources.source.startBlock` v manifestu podgrafu na číslo problematického bloku, abyste mohli vynechat indexování nepotřebných bloků a využít výhod vidličkování! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Takže to dělám takhle: -1. Spustím místní uzel Graf ([zde je návod, jak to udělat](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) s volbou `fork-base` nastavenou na: `https://api.thegraph.com/subgraphs/id/`, protože budu forkovat podgraf, ten chybný, který jsem nasadil dříve, z [Podgraph Studio](https://thegraph.com/studio/). +1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -90,8 +90,8 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. Po pečlivém prozkoumání si všímám, že existuje nesoulad v reprezentacích `id`, které se používají při indexaci `Gravatar` v mých dvou obslužných funkcích. Zatímco `handleNewGravatar` ho převede na hex (`event.params.id.toHex()`), `handleUpdatedGravatar` používá int32 (`event.params.id.toI32()`), což způsobuje, že `handleUpdatedGravatar` selže s chybou "Gravatar nenalezen!". Udělám, aby obě převedly `id` na hex. -3. Po provedení změn jsem nasadil svůj podgraf do místního uzlu Graf, **_rozvětveníl selhávající podgraf_** a nastavil `dataSources.source.startBlock` na `6190343` v `subgraph.yaml`: +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. +3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 From dc32d398e94214683bfede60e329c87fd259c04a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:50 -0500 Subject: [PATCH 0760/1534] New translations subgraph-debug-forking.mdx (Japanese) --- .../cookbook/subgraph-debug-forking.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ja/subgraphs/cookbook/subgraph-debug-forking.mdx index a18cf73d17b7..7d4e4d6a6e6f 100644 --- a/website/src/pages/ja/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -6,9 +6,9 @@ As with many systems processing large amounts of data, The Graph's Indexers (Gra ## さて、それは何でしょうか? -**サブグラフのフォーク**とは、*他*のサブグラフのストア(通常はリモート) からエンティティをフェッチするプロセスです。 +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -デバッグの文脈では、**サブグラフのフォーク**により、ブロック*X*への同期を待つことなく、ブロック*X*で失敗したサブグラフのデバッグを行うことができます。 +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## その方法は? @@ -18,9 +18,9 @@ In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph N ## コードを見てみましょう -サブグラフのデバッグに集中し続けるために、物事をシンプルにして、Ethereum Gravity スマート コントラクトのインデックスを作成する [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) を実行してみましょう。 +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -以下は、`Gravatar`のインデックスを作成するために定義されたハンドラで、バグが全くありません。 +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -53,9 +53,9 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St 3. 同期を待つ 4. 再び問題が発生した場合は、1に戻る -このように、通常のデバッグ処理とほぼ同じですが、1つだけ、処理を恐ろしく遅くするステップがあります:_3. 同期を待つ_ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -**サブグラフのフォーク**を利用することで、このステップを実質的に省略することができます。その方法は次の通りです: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. マッピングのソースを変更し、問題を解決する @@ -69,14 +69,14 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St 回答: -1. `fork-base`は「ベース」URLで、*subgraph id*が追加されたときのURL (`/`) はサブグラフのストアに対する有効な GraphQL endpoint であることを示します。 +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. フォーキングは簡単であり煩雑な手間はありません ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -また、サブグラフマニフェストの`dataSources.source.startBlock`フィールドを問題のあるブロックの番号に設定することを忘れないでください。そうすれば、不要なブロックのインデックス作成を省略して、フォークを利用することができます。 +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! そこで、以下の通りです: @@ -90,7 +90,7 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. よく調べてみると、2つのハンドラで `Gravatar` をインデックスする際に使用される `id` 表現にミスマッチがあることに気づきました。`handleNewGravatar` はそれを hex (`event.params.id.toHex()`) に変換しますが、`handleUpdatedGravatar` は int32 (`event.params.id.toI32()`) を使用するので `handleUpdatedGravatar` は "Gravatar not found!" でパニックになってしまうのです。両方とも`id`を16進数に変換するようにしています。 +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash From 56609d75c475bbae47049457e314650e97e1cfe9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:52 -0500 Subject: [PATCH 0761/1534] New translations subgraph-debug-forking.mdx (Portuguese) --- .../cookbook/subgraph-debug-forking.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/pt/subgraphs/cookbook/subgraph-debug-forking.mdx index 44e8e379599d..405699684aef 100644 --- a/website/src/pages/pt/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,25 +2,25 @@ title: Debugging de Subgraphs Rápido e Fácil Com Forks --- -Assim como vários sistemas que processam uma abundância de dados, os Indexadores do Graph (Graph Nodes) podem demorar um pouco para sincronizar o seu subgraph com a blockchain de destino. A discrepância entre mudanças rápidas para fins de debugging e os longos tempos de espera necessários para o indexing é extremamente contraprodutiva, e nós sabemos muito bem disso. É por isso que introduzimos o **subgraph forking**, desenvolvido pela [LimeChain](https://limechain.tech/); neste artigo, veja como é possível acelerar bastante o debugging de subgraphs! +Assim como vários sistemas que processam uma abundância de dados, os Indexadores do Graph (Graph Nodes) podem demorar um pouco para sincronizar o seu subgraph com a blockchain de destino. A discrepância entre mudanças rápidas para fins de debugging e os longos tempos de espera necessários para o indexing é extremamente contraprodutiva, e nós sabemos muito bem disso. É por isso que introduzimos o **forking de subgraphs**, programado pela [LimeChain](https://limechain.tech/); neste artigo. Veja como dá para acelerar bastante o debugging de subgraphs! ## Ok, o que é isso? -**Subgraph forking** é o processo de retirar entidades tranquilamente do armazenamento de _outro_ subgraph (sendo muitas vezes remoto). +**Forking de subgraphs** é o processo de retirar entidades tranquilamente do armazenamento de _outro_ subgraph (normalmente, remoto). -No contexto do debugging, o **subgraph forking** permite debugar o seu subgraph falho no bloco _X_ sem precisar esperar que ele sincronize até o bloco _X_. +No contexto do debugging, o **forking de subgraphs** permite debugar o seu subgraph falho no bloco _X_ sem precisar esperar que ele sincronize até o bloco _X_. ## O quê?! Como?! -Quando lanças um subgraph a um Graph Node remoto para o indexing e ele falha no bloco _X_, a boa notícia é que o Graph Node ainda servirá queries GraphQL com seu armazenamento, que é sincronizado até o bloco _X_. Ótimo! Podemos aproveitar este armazenamento "atualizado" para consertar os bugs que surgem ao indexar o bloco _X_. +Quando um subgraph é implementado a um Graph Node remoto para indexação, e ele falha no bloco _X_, a boa notícia é que o Graph Node ainda servirá queries GraphQL com seu armazenamento, que é sincronizado até o bloco _X_. Ótimo! Podemos aproveitar este armazenamento "atualizado" para consertar os bugs que surgem ao indexar o bloco _X_. -Resumindo, _faremos um fork do subgraph falho_ de um Graph Node remoto para garantir que o subgraph seja indexado até o bloco _X_, para fornecer ao subgraph lançado localmente uma visão atualizada do estado da indexação, sendo este debugado no bloco _X_. +Resumindo, faremos um fork do subgraph falho de um Graph Node remoto para garantir que o subgraph seja indexado até o bloco _X_, para fornecer ao subgraph implantado localmente uma visão atualizada do estado da indexação; sendo este debugado no bloco _X_. ## Por favor, quero ver códigos! -Para manter o foco no debugging dos subgraphs, vamos simplificar as coisas e seguir o [exemplo de subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) com a indexação do contrato inteligente do Ethereum Gravity. +Para manter a concentração no debugging de subgraphs, vamos começar com coisas simples: siga o [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) a indexar o contrato inteligente Ethereum Gravity. -Aqui estão os handlers definidos para o indexamento dos `Gravatar`s, sem qualquer bug: +Aqui estão os handlers definidos para a indexação dos `Gravatars`, sem qualquer bug: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Que pena! Quando eu lanço o meu lindo subgraph ao [Subgraph Studio](https://thegraph.com/studio/), ele falha com o erro _"Gravatar not found"_ (Gravatar não encontrado). +Que pena! Quando eu implanto o meu lindo subgraph no Subgraph Studio, ele falha com o erro "Gravatar not found" (Gravatar não encontrado). A maneira mais comum de tentar consertar este erro é: 1. Fazer uma mudança na fonte dos mapeamentos, que talvez possa resolver o problema (mas é claro que não vai). -2. Reeditar o subgraph ao [Subgraph Studio](https://thegraph.com/studio/) (ou outro Graph Node remoto). +2. Implante o subgraph novamente no [Subgraph Studio](https://thegraph.com/studio/) (ou outro Graph Node remoto). 3. Esperar que ele se sincronize. 4. Se quebrar novamente, volte ao passo 1. Se não: Eba! -É um típico processo ordinário de debug, mas há um passo que retarda muito o processo: _3. Esperar que ele se sincronize._ +É um típico processo ordinário de debug, mas há um passo que atrasa muito o processo: _3. Esperar que ele se sincronize._ -Com o **subgraph forking**, podemos essencialmente eliminar este passo. Ele é algo assim: +Com o **forking de subgraphs**, essencialmente, podemos eliminar este passo. É mais ou menos assim: 0. Crie um Graph Node local com o conjunto de **_fork-base apropriado_**. 1. Faça uma mudança na fonte dos mapeamentos, que talvez possa resolver o problema. -2. Lance ao Graph Node local, **_faça um fork do subgraph falho_** e **_comece do bloco problemático_**. +2. Implante-o no Graph Node local, **faça um fork do subgraph falho**, e **comece do bloco problemático_**. 3. Se quebrar novamente, volte ao passo 1. Se não: Eba! Agora, você deve ter duas perguntas: @@ -69,7 +69,7 @@ Agora, você deve ter duas perguntas: E eu respondo: -1. `fork-base` é o URL "base", tal que quando o _subgraph id_ é atrelado, o URL resultante (`/`) se torna um endpoint GraphQL válido para o armazenamento do subgraph. +1. `fork-base` é o URL "base", tal que quando a _id de subgraph_ é atrelada, o URL resultante (`/`) se torna um ponto final GraphQL válido para o armazenamento do subgraph. 2. Forking é fácil, não precisa se preocupar: ```bash @@ -90,8 +90,8 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. Após vistoriar com cuidado, percebo uma discrepância nas representações de `id` usadas ao indexar `Gravatars` nos meus dois handlers. Enquanto `handleNewGravatar` o converte a um hex (`event.params.id.toHex()`), o `handleUpdatedGravatar` usa um int32 (`event.params.id.toI32()`). Assim, o `handleUpdatedGravatar` entra em pânico com o "Gravatar não encontrado!". Eu faço os dois converterem o `id` em um hex. -3. Após ter feito as mudanças, lanço o meu subgraph ao Graph Node local, **_forkando o subgraph falho_** e programando o `dataSources.source.startBlock` em `6190343` no `subgraph.yaml`: +2. Após vistoriar com cuidado, percebo uma discrepância nas representações de `id` usadas ao indexar `Gravatar`s nos meus dois handlers. Enquanto `handleNewGravatar` o converte a um hex (`event.params.id.toHex()`), o `handleUpdatedGravatar` usa um int32 (`event.params.id.toI32()`). Assim, o `handleUpdatedGravatar` entra em pânico com o "Gravatar não encontrado!". Eu faço os dois converterem o `id` em um hex. +3. Após ter feito as mudanças, implanto o meu subgraph no Graph Node local, **_fazendo um fork do subgraph falho_** e programando o `dataSources.source.startBlock` em `6190343` no `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 From d2f8cc869ccde8df22df498b874369114c23e928 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:53 -0500 Subject: [PATCH 0762/1534] New translations subgraph-debug-forking.mdx (Russian) --- .../cookbook/subgraph-debug-forking.mdx | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ru/subgraphs/cookbook/subgraph-debug-forking.mdx index 614980481ab6..8f2e67289d77 100644 --- a/website/src/pages/ru/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -1,26 +1,26 @@ --- -title: Быстрая и простая отладка подграфа с использованием форков +title: Быстрая и простая отладка субграфа с использованием форков --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +Как и многие системы, обрабатывающие большие объемы данных, Индексаторы The Graph (Graph Nodes) могут занять достаточно много времени для синхронизации Вашего субграфа с целевым блокчейном. Несоответствие между быстрыми изменениями, направленными на отладку, и долгим временем ожидания, необходимым для индексирования, является крайне непродуктивным, и мы прекрасно осознаем эту проблему. Поэтому мы представляем **форкинг субграфа**, разработанный [LimeChain](https://limechain.tech/), и в этой статье я покажу, как эту функцию можно использовать для значительного ускорения отладки субграфов! ## И так, что это? -**Subgraph forking** - это процесс ленивой выборки объектов из _ хранилища другого _ подграфа (обычно удаленного). +**Форкинг субграфа** — это процесс ленивой загрузки объектов из _другого_ хранилища субграфа (обычно удалённого). -В контексте отладки **форкинга подграфа** позволяет вам отлаживать ваш проблемный подграф в блоке _X_ без необходимости ждать синхронизации с блоком _X_. +В контексте отладки **форкинг субграфа** позволяет Вам отлаживать Ваш неудавшийся субграф на блоке _X_, не дожидаясь синхронизации с блоком _X_. ## Что? Как? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +Когда Вы развертываете субграф на удалённой Graph Node для индексирования, и он терпит неудачу на блоке _X_, хорошая новость заключается в том, что Graph Node всё равно будет обслуживать запросы GraphQL, используя своё хранилище, которое синхронизировано с блоком _X_. Это здорово! Таким образом, мы можем воспользоваться этим "актуальным" хранилищем, чтобы исправить ошибки, возникающие при индексировании блока _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +Короче говоря, мы собираемся _форкать неработающий субграф_ с удалённой Graph Node, которая гарантированно имеет индексированный субграф до блока _X_, чтобы предоставить локально развернутому субграфу, который отлаживается на блоке _X_, актуальное состояние индексирования. ## Пожалуйста, покажите мне какой-нибудь код! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +Чтобы сосредоточиться на отладке субграфа, давайте упростим задачу и продолжим с [примером субграфа](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar), который индексирует смарт-контракт Ethereum Gravity. -Вот обработчики, установленные для индексирования `Gravatar`s, совершенно без ошибок: +Вот обработчики, определённые для индексирования `Gravatar`, без каких-либо ошибок: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -44,43 +44,43 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Ой, как неприятно! Когда я развертываю свой идеально выглядящий субграф в [Subgraph Studio](https://thegraph.com/studio/), он выдаёт ошибку _"Gravatar not found!"_. Обычный способ попытаться исправить это: -1. Внести изменения в источник сопоставлений, которые, по вашему мнению, решат проблему (в то время как я знаю, что это не так). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +1. Внести изменения в источник мэппингов, которые, по Вашему мнению, решат проблему (в то время как я знаю, что это не так). +2. Перезапустить развертывание своего субграфа в [Subgraph Studio](https://thegraph.com/studio/) (или на другую удалённую Graph Node). 3. Подождать, пока он синхронизируется. 4. Если он снова сломается, вернуться к пункту 1, в противном случае: Ура! -Это действительно довольно знакомо для обычного процесса отладки, но есть один шаг, который ужасно замедляет процесс: _3. Подождите, пока он синхронизируется._ +Действительно, это похоже на обычный процесс отладки, но есть один шаг, который ужасно замедляет процесс: _3. Ждите, пока завершится синхронизация._ -Используя **форк подграфа**, мы можем существенно исключить этот шаг. Вот как это выглядит: +Используя **форк субграфа**, мы можем фактически устранить этот шаг. Вот как это выглядит: -0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. -1. Внесите изменения в источник сопоставлений, которые, по вашему мнению, решат проблему. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +0. Запустите локальную Graph Node с помощью **_соответстсвующего набора fork-base_**. +1. Внесите изменения в источник мэппингов, которые, по Вашему мнению, решат проблему. +2. Произведите развертывание на локальной Graph Node, **_форкнув неудачно развернутый субграф_** и **_начав с проблемного блока_**. 3. Если он снова сломается, вернитесь к пункту 1, в противном случае: Ура! -Сейчас у вас может появиться 2 вопроса: +Сейчас у Вас может появиться 2 вопроса: -1. fork-base что??? +1. fork-base - что это??? 2. Форкнуть кого?! И я вам отвечаю: -1. `fork-base` - это "базовый" URL, такой, что при добавлении _идентификатора подграфа_ результирующий URL (`/`) является допустимым эндпоинтом GraphQL для хранилища подграфа. -2. Форк - это просто, особо не нужно потеть: +1. `fork-base` - это «базовый» URL, при добавлении которого к _subgraph id_ результирующий URL (`/`) является действительной конечной точкой GraphQL для хранилища субграфа. +2. Форкнуть легко, не нужно напрягаться: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Кроме того, не забудьте установить в поле `DataSources.source.startBlock` в манифесте подграфа номер проблемного блока, чтобы вы могли пропустить индексацию ненужных блоков и воспользоваться преимуществами fork! +Также не забудьте установить поле `dataSources.source.startBlock` в манифесте субграфа на номер проблемного блока, чтобы пропустить индексирование ненужных блоков и воспользоваться преимуществами форка! Итак, вот что я делаю: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. Я запускаю локальную Graph Node ([вот как это сделать](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) с опцией `fork-base`, установленной в: `https://api.thegraph.com/subgraphs/id/`, поскольку я буду форкать субграф, тот самый, который я ранее развертывал, с [Subgraph Studio](https://thegraph.com/studio/). ``` $ cargo run -p graph-node --release -- \ @@ -90,12 +90,12 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. После тщательной проверки я замечаю, что существует несоответствие в представлениях `id`, используемых при индексации `Gravatar`s в моих двух обработчиках. В то время как `handleNewGravatar` преобразует его в шестнадцатеричное значение (`event.params.id.toHex()`), `handleUpdatedGravatar` использует int32 (`event.params.id.toI32()`), который вызывает `handleUpdatedGravatar` панику с сообщением "Gravatar not found!". Я заставляю их обоих преобразовать `идентификатор` в шестнадцатеричное значение. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +2. После тщательной проверки я замечаю, что существует несоответствие в представлениях `id`, используемых при индексировании `Gravatar` в двух моих обработчиках. В то время как `handleNewGravatar` конвертирует его в hex (`event.params.id.toHex()`), `handleUpdatedGravatar` использует int32 (`event.params.id.toI32()`), что приводит к тому, что `handleUpdatedGravatar` завершается ошибкой и появляется сообщение "Gravatar not found!". Я заставляю оба обработчика конвертировать `id` в hex. +3. После внесения изменений я развертываю свой субграф на локальной Graph Node, **выполняя форк неудавшегося субграфа** и устанавливаю значение `dataSources.source.startBlock` равным `6190343` в файле `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` -4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +4. Я проверяю логи, созданные локальной Graph Node, и, ура!, кажется, все работает. +5. Я развертываю свой теперь свободный от ошибок субграф на удаленной Graph Node и живу долго и счастливо! (но без картошки) From 3cf28230c7c0e8ec152a165dd0d90c324a2442de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:54 -0500 Subject: [PATCH 0763/1534] New translations subgraph-debug-forking.mdx (Swedish) --- .../cookbook/subgraph-debug-forking.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/sv/subgraphs/cookbook/subgraph-debug-forking.mdx index c9d3e5c2eb3e..aee8ecf8791f 100644 --- a/website/src/pages/sv/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -6,9 +6,9 @@ As with many systems processing large amounts of data, The Graph's Indexers (Gra ## Ok, vad är det? -**Subgraf forking** är processen att lätt hämta entiteter från _en annan_ subgrafs butik (vanligtvis en avlägsen sådan). +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -I samband med felsökning låter **subgraf forking** dig felsöka din misslyckade subgraf i block _X_ utan att behöva vänta för att synkronisera för att blockera _X_. +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## Vad?! Hur? @@ -18,9 +18,9 @@ In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph N ## Snälla, visa mig lite kod! -För att behålla fokus på subgraffelsökning, låt oss hålla saker och ting enkla och köra tillsammans med [exempel-undergraf](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexera Ethereum Gravity smarta kontrakt. +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -Här är hanterarna definierade för att indexera `Gravatar`s, utan några som helst buggar: +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -53,9 +53,9 @@ Det vanliga sättet att försöka fixa är: 3. Vänta tills det synkroniseras. 4. Om den går sönder igen gå tillbaka till 1, annars: Hurra! -Det är faktiskt ganska bekant med en vanlig felsökningsprocess, men det finns ett steg som saktar ner processen fruktansvärt: _3. Vänta tills det synkroniseras._ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -Genom att använda **subgraf forking** kan vi i princip eliminera detta steg. Så här ser det ut: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. Gör en ändring i mappningskällan som du tror kommer att lösa problemet. @@ -69,14 +69,14 @@ Nu kanske du har 2 frågor: Och jag svarar: -1. `fork-base` är "bas"-URL, så att när _subgraf id_ läggs till den resulterande URL-adressen (`/`) är en giltig GraphQL slutpunkt för subgrafens arkiv. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. Gaffling är lätt, du behöver inte svettas: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Glöm inte heller att ställa in `dataSources.source.startBlock`-fältet i undergraf manifestet till numret på det problematiska blocket, så att du kan hoppa över indexering av onödiga block och dra fördel av gaffeln! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! Så här är vad jag gör: @@ -90,7 +90,7 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. Efter noggrann inspektion märker jag att det finns en oöverensstämmelse i `id`-representationerna som används vid indexering av `Gravatar`s i mina två hanterare. Medan `handleNewGravatar` konverterar den till en hex (`event.params.id.toHex()`), använder `handleUpdatedGravatar` en int32 (`händelse. params.id.toI32()`) vilket gör att `handleUpdatedGravatar` får panik med "Gravatar not found!". Jag får dem båda att konvertera `id` till en hexadecimal. +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash From 1e2fdee3a534815e328886881abf97caf7394f49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:55 -0500 Subject: [PATCH 0764/1534] New translations subgraph-debug-forking.mdx (Turkish) --- .../cookbook/subgraph-debug-forking.mdx | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/tr/subgraphs/cookbook/subgraph-debug-forking.mdx index 121af4c5a33e..d739d1aee6d6 100644 --- a/website/src/pages/tr/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -2,25 +2,25 @@ title: Fork Kullanarak Hızlı ve Kolay Subgraph Hata Ayıklama --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +Büyük miktarda veri işleyen birçok sistemde olduğu gibi, The Graph'ın Endeksleyicilerinin (Graph Düğümlerinin), subgraph'inizi hedef blokzinciri ile senkronize etmesi ciddi ölçüde uzun sürebilir. Hata ayıklama amacıyla yapılan hızlı değişiklikler ile endeksleme için gereken uzun bekleme süreleri arasındaki uyumsuzluk son derece verimsiz olmaktadır. Bunun kesinlikle farkındayız. Bu yüzden, [LimeChain](https://limechain.tech/) tarafından geliştirilen **subgraph çatallama**yı sunuyoruz. Bu makalede size bu özelliğin subgraph hata ayıklamayı önemli ölçüde hızlandırmak için nasıl kullanılabileceğini göstereceğim! ## Peki, nedir bu Subgraph Forklama? -**Subgraph forklama**, _başka_ bir subgraph'ın deposundan(genellikle uzaktaki birinden) unsurları yavaş bir şekilde getirme işlemidir. +**Subgraph çatallama**, _başka bir_ subgraph'in mağazasından (genellikle uzak bir mağaza) tembel bir şekilde öge çekme işlemidir. -Hata ayıklama bağlamında, **subgraph forklama**, _X_ bloğunda başarısız olan subgraph'ınızda yine aynı _X_ bloğunun senkronize olmasını beklemeksizin hata ayıklamanıza olanak tanır. +Hata ayıklama bağlamında, **subgraph çatallama** başarısız olmuş subgraph'i, _X_ blokuna kadar senkronize olmasını beklemeden hata ayıklamanıza olanak tanır. ## Ne?! Nasıl? -When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +Bir subgraph'i uzaktaki bir Graph Düğümüne endekslemek amacıyla dağıttığınızda subgraph _X_ blokunda çalışmayı durdurabilir. İyi haber ise Graph Düğümü, _X_ blokuna kadar senkronize olmuş deposunu kullanarak GraphQL sorgularını yerine getiriyor olacaktır. Bu harika bir haber! Bu durum, endeksleme sırasında _X_ blokunda ortaya çıkan hataları düzeltmek için bu "güncel" depodan faydalanabileceğimiz anlamına gelir. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +Özetle, _çalışmayı durdurmuş bir subgraph'i_, _X_ blokuna kadar endekslenmesi garanti edilen uzak bir Graph Düğümünden _çatallayacağız_. Böylece _X_ blokunda hatası ayıklanan yerel olarak dağıtılmış subgraph'in endeksleme durumunu gösteren güncel bir görünüm sağlamış olacağız. ## Lütfen bana biraz kod göster! -Subgraph hata ayıklamasında konsantrasyonu bozmamak adına işleri basit tutalım ve Ethereum Gravity akıllı sözleşmesini indeksleyen [subgraph örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) ile ilerleyelim. +Subgraph hata ayıklamalarına odaklanmak için işleri basitleştirelim ve Ethereum Gravatar akıllı sözleşmesini endeksleyen [örnek-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) üzerinde çalışalım. -Burada hiç hata olmadan `Gravatar`ları indekslemek için tanımlanan işleyiciler şunlardır: +`Gravatar`'ları endekslemek için tanımlanan, hiçbir hata içermeyen işleyiciler şunlardır: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -44,22 +44,22 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. +Eyvah, ne talihsiz bir durum, mükemmel görünen subgraph'imi [Subgraph Studio](https://thegraph.com/studio/) üzerinde dağıttığımda _"Gravatar bulunamadı!"_ hatası ile çalışmayı durduruyor. Genellikle düzeltmeyi denemek için yol şudur: 1. Eşleştirme kaynağında, sorunu çözeceğine inandığınız bir değişiklik yapın (ama ben çözmeyeceğini biliyorum). -2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). +2. Subgraph'i [Subgraph Studio](https://thegraph.com/studio/) (veya başka bir uzak Graph Düğümü) üzerinde yeniden dağıtın. 3. Senkronize olması için bekleyin. 4. Tekrar sorunla karşılaşırsanız 1. aşamaya geri dönün, aksi takdirde: Yaşasın! -Bu, sıradan bir hata ayıklama işlemine gerçekten oldukça benzerdir, fakat işlemi korkunç şekilde yavaşlatan bir adım vardır: _3. Senkronize olması için bekleyin._ +Bu gerçekten sıradan bir hata ayıklama sürecine oldukça benzemektedir, ancak süreci korkunç derecede yavaşlatan bir adım vardır: _3. Senkronize olmasını bekleyin._ -Aslında **subgraph forklama** kullanarak bu adımı ortadan kaldırabiliriz. Nasıl göründüğüne bakalım: +**Subgraph çatallama** kullanarak bu adımı ortadan kaldırabiliriz. Aşağıda bu işlemi görebilirsiniz: -0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. +0. **_Uygun çatal-temeli (fork-base)_** ayarlanmış yerel bir Graph Düğümü başlatın. 1. Eşleştirme kaynağında, sorunu çözeceğine inandığınız bir değişiklik yapın. -2. Deploy to the local Graph Node, **_forking the failing subgraph_** and **_starting from the problematic block_**. +2. Çalışmayı durduran subgraph'i **_çatallayarak_** ve **_sorunlu bloktan başlayarak_** yerel Graph Düğümüne dağıtın. 3. Tekrar sorunla karşılaşırsanız 1. aşamaya geri dönün, aksi takdirde: Yaşasın! Şimdi, 2 sorunuz olabilir: @@ -69,18 +69,18 @@ Aslında **subgraph forklama** kullanarak bu adımı ortadan kaldırabiliriz. Na Ve ben cevap veriyorum: -1. `fork temelli (fork-base)` subgraph deposu için geçerli bir GraphQL uç noktası oluşturacak şekilde _subgraph kimliği(id)_ eklendiğinde oluşan URL'ye (`/`) eklenen "temel" bir URL'dir. +1. `fork-base`, "temel" URL'dir, böylece devamına _subgraph id_ eklendiğinde oluşan URL (`/`) subgraph'in depolaması için geçerli bir GraphQL uç noktası olur. 2. Forklama kolay, ter dökmeye gerek yok: ```bash -$ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 +$ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Ayrıca, subgraph manifestindeki `dataSources.source.startBlock` alanını sorunlu bloğun numarasına ayarlamayı unutmayın, böylece gereksiz blokları indekslemeyi geçebilir ve forklamanın avantajından yararlanabilirsiniz! +Ayrıca, ihtiyaç olmayan blokları endekslemeyi atlamak ve çatallamanın avantajlarından yararlanmak için `subgraph` manifesto dosyasındaki `dataSources.source.startBlock` alanını sorunlu blokun numarası olarak ayarlamayı unutmayın! İşte benim ne yaptığım: -1. I spin-up a local Graph Node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from [Subgraph Studio](https://thegraph.com/studio/). +1. Lokal bir Graph Düğümü başlatıyorum ([nasıl yapılacağını buradan öğrenebilirsiniz](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) ve `fork-base` seçeneğini şu şekilde ayarlıyorum: `https://api.thegraph.com/subgraphs/id/`, çünkü daha önce [Subgraph Studio](https://thegraph.com/studio/)dan dağıttığım hatalı `subgraph`i çatallayacağım. ``` $ cargo run -p graph-node --release -- \ @@ -90,12 +90,12 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. Dikkatli bir inceleme sonrasında, iki işleyicimdeki `Gravatar`ları indekslerken kullanılan `kimlik(id)` temsillerinde bir uyuşmazlık oldupunu fark ettim. `handleNewGravatar` onu bir 16'lık sisteme (`event.params.id.toHex()`) dönüştürürken, `handleUpdatedGravatar`, `handleUpdatedGravatar`'ın "Gravatar not found!" hatası vermesine neden olan bir int32 (`event.params.id.toI32()`) kullanır. Her ikisinde de `kimliğin` 16'lık sisteme dönüştürülmesini sağlarım. -3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +2. Dikkatli bir incelemeden sonra, iki işleyicimde `Gravatar`'ları endekslerken kullanılan `id` temsillerinde bir uyumsuzluk olduğunu fark ettim. `handleNewGravatar` onu bir hex'e dönüştürürken (`event.params.id.toHex()`), `handleUpdatedGravatar` bir int32 (`event.params.id.toI32()`) kullanıyor, bu da `handleUpdatedGravatar`'ın "Gravatar not found!" hatasını vermesine neden oluyor. İkisini de `id`'yi hex'e dönüştürecek şekilde düzenledim. +3. Değişiklikleri yaptıktan sonra, **_hatalı subgraph'i çatallayarak_** ve `subgraph.yaml` dosyasında `dataSources.source.startBlock` değerini `6190343` olarak ayarlayarak subgraph'imi yerel Graph Düğümü'ne dağıttım: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` -4. I inspect the logs produced by the local Graph Node and, Hooray!, everything seems to be working. -5. I deploy my now bug-free subgraph to a remote Graph Node and live happily ever after! (no potatoes tho) +4. Yerel Graph Düğümü tarafından üretilen günlükleri inceliyorum ve yaşasın! Her şey yolunda görünüyor. +5. Artık hatasız olan `subgraph`imi uzak bir Graph Düğümü'nde dağıtıyorum ve sonsuza kadar mutlu yaşıyorum! From 7ad802adfc5db3cc8cd34f510edb8990bd165100 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:56 -0500 Subject: [PATCH 0765/1534] New translations subgraph-debug-forking.mdx (Chinese Simplified) --- .../cookbook/subgraph-debug-forking.mdx | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/zh/subgraphs/cookbook/subgraph-debug-forking.mdx index 9407cd0d670a..7eef54e247ea 100644 --- a/website/src/pages/zh/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -4,23 +4,23 @@ title: 使用分叉快速轻松地调试子图 As with many systems processing large amounts of data, The Graph's Indexers (Graph Nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! -## 首先,让我们来看什么是子图分叉 +## 好的,那是什么? -**子图分叉** 是从*另一个* 子图的存储(通常是远程存储)中缓慢获取实体的过程。 +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -在调试时,**subgraph forking** 允许您在固定的区块 _X_ 中调试失败的子图,而无需等待区块同步 _X_。 +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. -## 让我们看看这是如何做到的? +## 什么?! 如何处理? When you deploy a subgraph to a remote Graph Node for indexing and it fails at block _X_, the good news is that the Graph Node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph Node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. -## 下面让我们看一下代码示例 +## 请给我看一些代码! -为了专注于子图调试,让我们保持简单,并与索引 Ethereum Gravity 智能合约的 [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) 一起运行。 +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -以下是索引 `Gravatar` 定义的处理程序,没有任何错误: +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -46,16 +46,16 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph Studio](https://thegraph.com/studio/) it fails with the _"Gravatar not found!"_ error. -尝试修复的常用方法是: +The usual way to attempt a fix is: -1. 在映射源中进行更改,你认为这将解决问题(但我知道它不会)。 +1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). 2. Re-deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) (or another remote Graph Node). -3. 等待同步。 -4. 如果它再次中断,则返回 第1步,否则:搞定! +3. Wait for it to sync-up. +4. If it breaks again go back to 1, otherwise: Hooray! -对于一个普通的调试过程来说很常见,但是有一个步骤会严重减缓这个过程:_3。 等待同步。_ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -使用 **子图分叉** 我们可以从根本上解决这个问题。 如下: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. 按照你认为可以解决问题的方法,在映射源中进行更改。 @@ -69,14 +69,14 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St 回答如下: -1. `子分叉集` 是“基础”URL,例如将 _子图 id_ 添加到结果 URL (`<子分叉集>/`) ,就是一个合法的子图GraphQL访问端口。 +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. 分叉容易,不要紧张: ```bash -$ graph 部署 --调试分叉 --ipfs地址 http://localhost:5001 --node http://localhost:8020 +$ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -另外,不要忘记将子图中的 `dataSources.source.startBlock` 字段设置为有问题的区块编号,这样您就可以跳过索引不必要的区块并利用分叉! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! 所以,我是这么做的: @@ -90,7 +90,7 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. 经过仔细检查,我注意到在我的两个处理程序中索引 `Gravatar` 时使用的 `id` 不匹配。 `handleNewGravatar` 将其转换为十六进制 (`event.params.id.toHex()`),而 `handleUpdatedGravatar` 使用 int32格式 (`event. params.id.toI32()`) 这会导致 `handleUpdatedGravatar` 出现“未找到 Gravatar!”的错误。 于是我将他们都 `id` 转换为十六进制。 +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash From 630dfda78dd86005080a72aa11918064f7b389d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:57 -0500 Subject: [PATCH 0766/1534] New translations subgraph-debug-forking.mdx (Urdu (Pakistan)) --- .../cookbook/subgraph-debug-forking.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/ur/subgraphs/cookbook/subgraph-debug-forking.mdx index 8dec0d2c31d4..7a0dadaa4dfe 100644 --- a/website/src/pages/ur/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -6,9 +6,9 @@ As with many systems processing large amounts of data, The Graph's Indexers (Gra ## ٹھیک ہے، یہ ہے کیا؟ -**سب گراف فورکنگ ** _دوسرے_ سب گراف اسٹور (عام طور پر ایک دور دراز) سے سستی سے ہستیوں کو بازیافت کرنے کا عمل ہے. +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -ڈیبگنگ کے تناظر میں، **سب گراف فورکنگ** آپ کو بغیر انتظار کیے بلاک _X_ پر اپنے ناکام سب گراف کو ڈیبگ کرنے کی اجازت دیتا ہے۔ _X_ کو بلاک کرنے کے لیے مطابقت پذیری کے لیے. +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## کیا؟! کیسے؟ @@ -18,9 +18,9 @@ In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph N ## براۓ مہربانی، مجہے کچھ کوڈ دکھائیں! -سب گراف ڈی بگنگ پر توجہ مرکوز رکھنے کے لیے، آئیے چیزوں کو سادہ رکھیں اور [مثالی- سب گراف](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) کے ساتھ چلائیں۔ ایتھیریم گریویٹی سمارٹ کنٹریکٹ کی انڈیکس کرنا. +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -یہاں `Gravatar` کی انڈیکسنگ کے لیے بیان کردہ ہینڈلرز ہیں، بغیر کسی بگ کے: +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -53,9 +53,9 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St 3. اس کے مطابقت پذیر ہونے کا انتظار کریں. 4. اگر یہ دوبارہ ٹوٹ جاتا ہے تو 1 پر واپس جائیں، ورنہ: ہورے! -یہ واقعی ایک عام ڈیبگ کے عمل سے کافی واقف ہے، لیکن ایک قدم ایسا ہے جو اس عمل کو بری طرح سست کر دیتا ہے: _3۔ اس کے مطابقت پذیر ہونے کا انتظار کریں۔_ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -**سب گراف فورکنگ** کا استعمال کرتے ہوئے ہم بنیادی طور پر اس مرحلے کو ختم کر سکتے ہیں۔ یہاں یہ کیسا لگتا ہے: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. میپنگ کے ماخذ میں تبدیلی کریں، جس سے آپ کو یقین ہے کہ مسئلہ حل ہو جائے گا. @@ -69,14 +69,14 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St اور میرا جواب: -1. `fork-base` "بیس" یو آر ایل ہے، اس طرح کہ جب _سب گراف آئی ڈی_ کو نتیجہ خیز URL میں شامل کیا جاتا ہے (`/`) سب گراف کے اسٹور کے لیے ایک درست گراف کیو ایل اینڈ پوائنٹ ہے. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. فورکنگ آسان ہے، پریشان ہونے کی ضرورت نہیں ہے: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -اس کے علاوہ، سب گراف مینی فیسٹ میں `dataSources.source.startBlock` فیلڈ کو مشکل بلاک کی تعداد پر سیٹ کرنا نہ بھولیں، تاکہ آپ غیر ضروری بلاکس کو انڈیکس کرنا چھوڑ کر فورک کا فائدہ اٹھا سکیں! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! تو، یہاں میں کیا کرتا ہوں: @@ -90,7 +90,7 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. محتاط معائنے کے بعد میں نے محسوس کیا کہ میرے دو ہینڈلرز میں `Gravatar` کو انڈیکس کرتے وقت استعمال ہونے والی `id` نمائندگیوں میں کوئی مماثلت نہیں ہے۔ جب کہ `handleNewGravatar` اسے ہیکس میں تبدیل کرتا ہے (`()event.params.id.toHex`)، ` handleUpdatedGravatar` int32 استعمال کرتا ہے (`()event.params.id.toI32`) جس کی وجہ سے `handleUpdatedGravatar` "Gravatar نہیں ملا!" سے گھبراتا ہے۔ میں ان دونوں کو `id` کو ہیکس میں تبدیل کرتا ہوں. +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash From 4d67a8c8d46a0a6a84269a4fa8a6cf8a8a0d5c8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:57:59 -0500 Subject: [PATCH 0767/1534] New translations subgraph-debug-forking.mdx (Marathi) --- .../cookbook/subgraph-debug-forking.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/mr/subgraphs/cookbook/subgraph-debug-forking.mdx index 4b1bcc0ede21..3c7f2ec051e3 100644 --- a/website/src/pages/mr/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -6,9 +6,9 @@ As with many systems processing large amounts of data, The Graph's Indexers (Gra ## ठीक आहे, ते काय आहे? -**सबग्राफ फोर्किंग** ही _दुसऱ्या_ सबग्राफच्या स्टोअरमधून (सामान्यत: रिमोट एक) घटक आणण्याची प्रक्रिया आहे. +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -डीबगिंगच्या संदर्भात, **सबग्राफ फोर्किंग** तुम्हाला तुमचा अयशस्वी सबग्राफ ब्लॉक _X_ येथे वाट न पाहता डीबग करण्याची अनुमती देते _X_ ला ब्लॉक करण्यासाठी सिंक-अप करण्यासाठी. +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## काय?! कसे? @@ -18,9 +18,9 @@ In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph N ## कृपया, मला काही कोड दाखवा! -सबग्राफ डीबगिंगवर लक्ष केंद्रित करण्यासाठी, चला गोष्टी सोप्या ठेवूया आणि [उदाहरण-सबग्राफ< सोबत चालवूया ](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) इथरियम ग्रॅविटी स्मार्ट कॉन्ट्रॅक्टचे अनुक्रमणिका. +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -कोणत्याही बगशिवाय `Gravatar`s अनुक्रमित करण्यासाठी परिभाषित हँडलर येथे आहेत: +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -53,9 +53,9 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St 3. ते समक्रमित होण्याची प्रतीक्षा करा. 4. तो पुन्हा खंडित झाल्यास 1 वर परत जा, अन्यथा: हुर्रे! -सामान्य डीबग प्रक्रियेसाठी हे खरोखरच परिचित आहे, परंतु एक पायरी आहे जी प्रक्रिया अत्यंत मंद करते: _3. ते समक्रमित होण्याची प्रतीक्षा करा._ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -**सबग्राफ फोर्किंग** वापरून आम्ही मूलत: ही पायरी काढून टाकू शकतो. ते कसे दिसते ते येथे आहे: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. मॅपिंग स्त्रोतामध्ये बदल करा, जो तुम्हाला विश्वास आहे की समस्या सोडवेल. @@ -69,14 +69,14 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St आणि मी उत्तर देतो: -1. `फोर्क-बेस` ही "बेस" URL आहे, जसे की जेव्हा _सबग्राफ आयडी_ जोडली जाते तेव्हा परिणामी URL (`/`) हा सबग्राफ स्टोअरसाठी एक वैध GraphQL एंडपॉइंट आहे. +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. काटा काढणे सोपे आहे, घाम गाळण्याची गरज नाही: ```bash -$ आलेख तैनात --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 +$ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -तसेच, सबग्राफ मॅनिफेस्टमधील `dataSources.source.startBlock` फील्ड समस्याग्रस्त ब्लॉकच्या संख्येवर सेट करण्यास विसरू नका, जेणेकरून तुम्ही अनावश्यक ब्लॉक्सची अनुक्रमणिका वगळू शकता आणि फोर्कचा फायदा घेऊ शकता! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! तर, मी काय करतो ते येथे आहे: From 37ace41806c65b505f232a206ff6188f7a026bbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:00 -0500 Subject: [PATCH 0768/1534] New translations subgraph-debug-forking.mdx (Hindi) --- .../cookbook/subgraph-debug-forking.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/subgraph-debug-forking.mdx b/website/src/pages/hi/subgraphs/cookbook/subgraph-debug-forking.mdx index da72d452509e..0dc044459311 100644 --- a/website/src/pages/hi/subgraphs/cookbook/subgraph-debug-forking.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/subgraph-debug-forking.mdx @@ -6,9 +6,9 @@ As with many systems processing large amounts of data, The Graph's Indexers (Gra ## ठीक है वो क्या है? -**सबग्राफ फोर्किंग** आलसी ढंग से _दूसरे_ सबग्राफ के स्टोर (आमतौर पर एक परोक्ष सबग्राफ) से इकाइयां को लाने की प्रक्रिया है। +**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). -डिबगिंग के संदर्भ में, **सबग्राफ फोर्किंग** आपको ब्लॉक*X* को सिंक-अप करने के लिए बिना प्रतीक्षा किए ब्लॉक _X_ पर अपने विफल सबग्राफ को डीबग करने की अनुमति देता है । +In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. ## क्या?! कैसे? @@ -18,9 +18,9 @@ In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph N ## कृपया मुझे कुछ कोड दिखाओ! -सबग्राफ डिबगिंग पर ध्यान केंद्रित करने के लिए, आइए चीजों को सरल रखें और एथेरियम ग्रेविटी स्मार्ट कॉन्ट्रैक्ट को इंडेक्स करने वाले [उदाहरण-सबग्राफ](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) के साथ चलें। +To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -`Gravatar`s को अनुक्रमणित करने के लिए परिभाषित हैंडलर यहां दिए गए हैं, बिना किसी बग के: +Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -53,9 +53,9 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St 3. इसके सिंक-अप होने की प्रतीक्षा करें। 4. यदि यह फिर से टूट जाता है तो 1 पर वापस जाएँ, अन्यथा: हुर्रे! -यह वास्तव में एक सामान्य डिबग प्रक्रिया से काफी परिचित है, लेकिन एक कदम है जो प्रक्रिया को बहुत धीमा कर देता है: _3. इसके सिंक-अप होने की प्रतीक्षा करें।_ +It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ -**सबग्राफ फोर्किंग** का उपयोग करके हम अनिवार्य रूप से इस चरण को समाप्त कर सकते हैं। यहाँ यह कैसा दिखता है: +Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: 0. Spin-up a local Graph Node with the **_appropriate fork-base_** set. 1. मैपिंग सोर्स में परिवर्तन करें, जिसके बारे में आपको लगता है कि इससे समस्या हल हो जाएगी. @@ -69,14 +69,14 @@ Oops, how unfortunate, when I deploy my perfect looking subgraph to [Subgraph St और मैं उत्तर देता हूं: -1. `fork-base` "आधार" URL है, जैसे कि जब _सबग्राफ आईडी_ जोड़ा जाता है तो परिणामी URL (`/`) सबग्राफ के स्टोर के लिए एक मान्य ग्राफ़क्यूएल एंडपॉइंट है। +1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. 2. फोर्किंग आसान है, पसीना बहाने की जरूरत नहीं: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -साथ ही, सबग्राफ मेनिफ़ेस्ट में `dataSources.source.startBlock` फ़ील्ड को समस्याग्रस्त ब्लॉक की संख्या पर सेट करना न भूलें, ताकि आप अनावश्यक ब्लॉकों को इंडेक्सिंग करना छोड़ सकें और फोर्क का लाभ उठा सकें! +Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! तो, यहाँ मैं क्या करता हूँ: @@ -90,7 +90,7 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -2. सावधानीपूर्वक निरीक्षण के बाद मैंने देखा कि मेरे दो हैंडलर में `Gravatar` को अनुक्रमित करते समय उपयोग किए जाने वाले `id` अभ्यावेदन में एक बेमेल है। जबकि `handleNewGravatar` इसे हेक्स (`event.params.id.toHex()`) में बदल देता है, `handleUpdatedGravatar` एक int32 (`event. params.id.toI32()`) जो "Gravatar नहीं मिला!" से घबराने के लिए `handleUpdatedGravatar` का कारण बनता है। मैं उन दोनों को `आईडी` को हेक्स में बदलने के लिए तैयार करता हूं। +2. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 3. After I made the changes I deploy my subgraph to the local Graph Node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: ```bash From ea25ca2da2bdc468a6c4b863bbd312765fe3c26a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:01 -0500 Subject: [PATCH 0769/1534] New translations subgraph-uncrashable.mdx (French) --- .../cookbook/subgraph-uncrashable.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/fr/subgraphs/cookbook/subgraph-uncrashable.mdx index 319851bc8579..fadcd9b98faf 100644 --- a/website/src/pages/fr/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,28 +2,28 @@ title: Générateur de code de subgraph sécurisé --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) est un outil de génération de code qui génère un ensemble de fonctions d'assistance à partir du schéma graphql d'un projet.Il garantit que toutes les interactions avec les entités de votre subgraph sont totalement sûres et cohérentes. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## Pourquoi intégrer Subgraph Uncrashable ? -- **Disponibilité continue**. Les entités mal gérées peuvent provoquer le crash des subgraphs, ce qui peut perturber les projets qui dépendent de The Graph. Configurez des fonctions d'assistance pour rendre vos subgraphs "incassables" et assurer la continuité des activités. +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **Complètement sûr**. Les problèmes courants rencontrés dans le développement de subgraphs sont les problèmes de chargement d'entités non définies, ne pas définir ou initialiser toutes les valeurs des entités, et les conditions de concurrence lors du chargement et de la sauvegarde des entités. Assurez-vous que toutes les interactions avec les entités sont complètement atomiques. +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **Configurable par l'utilisateur** Définissez les valeurs par défaut et configurez le niveau de contrôles de sécurité qui convient aux besoins de votre projet individuel. Des journaux d'avertissement sont enregistrés indiquant où il y a une violation de la logique de subgraph pour aider à corriger le problème afin d'assurer l'exactitude des données. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**Fonctionnalités principales** +**Key Features** -- L'outil de génération de code prend en charge **tous** les types de subgraphs et est configurable pour permettre aux utilisateurs de définir des valeurs par défaut raisonnables. La génération de code utilisera cette configuration pour générer des fonctions d'assistance conformes aux spécifications de l'utilisateur. +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - Le cadre comprend également un moyen (via le fichier de configuration) de créer des fonctions de définition personnalisées, mais sûres, pour des groupes de variables d'entité. De cette façon, il est impossible pour l'utilisateur de charger/utiliser une entité de graph obsolète et il est également impossible d'oublier de sauvegarder ou définissez une variable requise par la fonction. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Les logs d'avertissement sont enregistrés sous forme de logs indiquant où il y a une violation de la logique du subgraph pour aider à corriger le problème afin d'assurer l'exactitude des données. Subgraph Uncrashable peut être exécuté en tant qu'indicateur facultatif à l'aide de la commande Graph CLI codegen. ```sh -graphe codegen -u [options] [] +graph codegen -u [options] [] ``` -Visitez la [subgraph de documentation incassable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ou regardez ceci [tutoriel vidéo](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) pour en savoir plus et commencer à développer des subgraphs plus sûrs. +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From e1b2d257274ce8fa5ddece0cda7cb958a80e97d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:02 -0500 Subject: [PATCH 0770/1534] New translations subgraph-uncrashable.mdx (Spanish) --- .../es/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/es/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/es/subgraphs/cookbook/subgraph-uncrashable.mdx index d7a39a67df81..59b33568a1f2 100644 --- a/website/src/pages/es/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/es/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,19 +2,19 @@ title: Generador de código de subgrafo seguro --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) es una herramienta de generación de código que genera un conjunto de funciones auxiliares a partir del esquema graphql de un proyecto. Garantiza que todas las interacciones con las entidades en su subgrafo sean completamente seguras y consistentes. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## ¿Por qué integrarse con Subgraph Uncrashable? -- **Tiempo de actividad continuo**. Las entidades mal manejadas pueden hacer que los subgrafos se bloqueen, lo que puede ser perjudicial para los proyectos que dependen de The Graph. Configura funciones auxiliares para que tus subgrafos sean "uncrashable" y garantices la continuidad del negocio. +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **Completamente seguro**. Los problemas comunes que se observan en el desarrollo de subgrafos son problemas de carga de entidades indefinidas, no establecer o inicializar todos los valores de las entidades y condiciones de carrera al cargar y guardar entidades. Asegúrate de que todas las interacciones con las entidades sean completamente atómicas. +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **Configurable por el usuario** Establece valores predeterminados y configura el nivel de controles de seguridad que se adapte a las necesidades de tu proyecto individual. Se registran registros de advertencia que indican dónde hay una infracción de la lógica del subgrafo para ayudar a solucionar el problema y garantizar la precisión de los datos. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**Características clave** +**Key Features** -- La herramienta de generación de código se adapta a **todos** los tipos de subgrafos y se puede configurar para que los usuarios establezcan valores predeterminados sensatos. La generación de código utilizará esta configuración para generar funciones auxiliares que cumplan con las especificaciones de los usuarios. +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - El marco también incluye una forma (a través del archivo de configuración) para crear funciones de establecimiento personalizadas, pero seguras, para grupos de variables de entidad. De esta forma, es imposible que el usuario cargue/utilice una entidad gráfica obsoleta y también es imposible olvidarse de guardar o configurar una variable requerida por la función. @@ -26,4 +26,4 @@ Subgraph Uncrashable se puede ejecutar como un indicador opcional mediante el co graph codegen -u [options] [] ``` -Visita la [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) o mira este [video tutorial](https://float- capital.github.io/float-subgraph-uncrashable/docs/tutorial) para obtener más información y comenzar a desarrollar subgrafos más seguros. +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From e7ccc31f3a3db54a4a8d0c8620730552b2610afe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:04 -0500 Subject: [PATCH 0771/1534] New translations subgraph-uncrashable.mdx (Czech) --- .../cs/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/cs/subgraphs/cookbook/subgraph-uncrashable.mdx index 13c979d18853..53750dd1cbee 100644 --- a/website/src/pages/cs/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,19 +2,19 @@ title: Generátor kódu bezpečného podgrafu --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) je nástroj pro generování kódu, který generuje sadu pomocných funkcí ze schématu graphql projektu. Zajišťuje, že všechny interakce s entitami v podgrafu jsou zcela bezpečné a konzistentní. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## Proč se integrovat s aplikací Subgraph Uncrashable? -- **Plynulá doba provozu**. Nesprávně zpracované entity mohou způsobit pád dílčích grafů, což může narušit projekty, které jsou na Grafu závislé. Nastavte pomocné funkce, které zajistí, aby vaše podgrafy "nepadaly" a zajistily nepřetržitost provozu. +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **Úplně bezpečné**. Mezi běžné problémy, které se při vývoji podgrafů vyskytují, patří problémy s načítáním nedefinovaných entit, nenastavením nebo inicializací všech hodnot entit a závodní podmínky při načítání a ukládání entit. Zajistěte, aby všechny interakce s entitami byly zcela atomické. +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **Uživatelsky konfigurovatelné** Nastavte výchozí hodnoty a nakonfigurujte úroveň bezpečnostních kontrol, která vyhovuje potřebám vašeho projektu. Zaznamenávají se výstražné protokoly, které uvádějí, kde došlo k porušení logiky dílčího grafu, aby bylo možné problém opravit a zajistit přesnost dat. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**Klíčové vlastnosti** +**Key Features** -- Nástroj pro generování kódu vyhovuje **všem** typům podgrafů a je konfigurovatelný, aby uživatelé mohli nastavit rozumné výchozí hodnoty. Generování kódu použije tuto konfiguraci k vygenerování pomocných funkcí, které odpovídají zadání uživatele specifikace. +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - Framework také obsahuje způsob (prostřednictvím konfiguračního souboru), jak vytvořit vlastní, ale bezpečné funkce setteru pro skupiny proměnných entit. Tímto způsobem není možné, aby uživatel načetl/použil zastaralou entitu grafu, a také není možné zapomenout uložit nebo nastavit proměnnou, kterou funkce vyžaduje. @@ -26,4 +26,4 @@ Podgraf Uncrashable lze spustit jako volitelný příznak pomocí příkazu Grap graph codegen -u [options] [] ``` -Navštivte [dokumentaci k subgrafů bez možnosti havárie](https://float-capital.github.io/float-subgraph-uncrashable/docs/) nebo se podívejte na tento [video tutoriál](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial), kde se dozvíte více a můžete začít s vývojem bezpečnějších subgrafů. +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From 3383adcc1e0bac8aaa328102d9a51d1a14d2a84f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:06 -0500 Subject: [PATCH 0772/1534] New translations subgraph-uncrashable.mdx (Japanese) --- .../ja/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ja/subgraphs/cookbook/subgraph-uncrashable.mdx index 97d6d7fb8fe4..74d66b27fcaa 100644 --- a/website/src/pages/ja/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,19 +2,19 @@ title: 安全なサブグラフのコード生成 --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/)は、プロジェクトのgraphqlスキーマからヘルパー関数のセットを生成するコード生成ツールです。これにより、サブグラフ内のエンティティとのすべてのインタラクションが完全に安全で一貫性のあるものになることを保証します。 +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## Subgraph Uncrashable と統合する理由 -- **継続的なアップタイム**です。誤って処理されたエンティティによってサブグラフがクラッシュすることがあり、The Graphに依存しているプロジェクトに支障をきたすことがあります。ヘルパー関数を設定して、サブグラフを「クラッシュしない」ようにし、ビジネスの継続性を確保しましょう。 +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **完全な安全**です。サブグラフの開発でよく見られる問題は、未定義のエンティティのロード、エンティティのすべての値の設定または初期化、エンティティのロードと保存のレースコンディションの問題です。エンティティとのすべてのインタラクションが完全にアトミックであることを確認する。 +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **ユーザー設定可能** デフォルト値を設定し、個々のプロジェクトのニーズに合ったセキュリティチェックのレベルを設定することができます。サブグラフのロジックに違反している箇所を示す警告ログが記録され、データの正確性を確保するために問題の修正に役立ちます。 +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**主な特徴** +**Key Features** -- コード生成ツールは、**すべての**サブグラフ・タイプに対応し、ユーザーが値を正当に設定できるように設定可能です。コード生成は、この設定を用いて、ユーザの仕様に沿ったヘルパー関数を生成します。 +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - また、このフレームワークには、エンティティ変数のグループに対して、カスタムだが安全なセッター関数を作成する方法が(設定ファイルを通じて)含まれています。この方法では、ユーザーが古いグラフ・エンティティをロード/使用することは不可能であり、また、関数が必要とする変数の保存や設定を忘れることも不可能です。 @@ -26,4 +26,4 @@ Subgraph Uncrashableは、Graph CLI codegenコマンドでオプションのフ graph codegen -u [options] [] ``` -[subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/)や[video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial)で、より安全なサブグラフの開発について詳しく知ることができます。 +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From 0799ab5d9c6f4678226c478c14cb7c63e694c33a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:08 -0500 Subject: [PATCH 0773/1534] New translations subgraph-uncrashable.mdx (Portuguese) --- .../pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx index 19defed68524..522740ee8246 100644 --- a/website/src/pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -6,11 +6,11 @@ O [Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrasha ## Por que integrar com a Subgraph Uncrashable? -- **Atividade Contínua**. Entidades mal-cuidadas podem causar panes em subgraphs, o que pode ser perturbador para projetos dependentes no The Graph. Prepare funções de helper para deixar os seus subgraphs "incrasháveis" e garantir a continuidade dos negócios. +- **Atividade Contínua**. Entidades mal-cuidadas podem causar panes em subgraphs, o que pode ser perturbador para projetos dependentes no The Graph. Prepare funções de helper para deixar os seus subgraphs "impossíveis de travar" e garantir a continuidade dos negócios. -- **Totalmente Seguro**. Alguns dos problemas comuns vistos no desenvolvimento de subgraphs são problemas de carregamento de entidades não definidas; o não-preparo ou inicialização de todos os valores de entidades; e condições de corrida sobre carregamento e salvamento de entidades. Garanta que todas as interações com entidades sejam completamente atômicas. +- **Totalmente Seguro**. Alguns dos problemas comuns vistos na programação de subgraphs são problemas de carregamento de entidades não definidas; o não-preparo ou inicialização de todos os valores de entidades; e condições de corrida sobre carregamento e salvamento de entidades. Garanta que todas as interações com entidades sejam completamente atômicas. -- **Configurável pelo Usuário**. Determine valores padrões e configure o nível de checagens de segurança necessário para o seu projeto individual. São gravados logs de aviso, que indicam onde há uma brecha de lógica no subgraph para ajudar a solucionar o problema e garantir a precisão dos dados. +- **Configurável pelo Utilizador**. Determine valores padrão e configure o nível necessário de verificações de segurança para o seu projeto. São gravados registros de aviso que indicam onde há uma brecha de lógica no subgraph, auxiliando o processo de solução de problemas e garantir a precisão dos dados. **Características Importantes** @@ -26,4 +26,4 @@ A Subgraph Uncrashable pode ser executada como flag opcional usando o comando co graph codegen -u [options] [] ``` -Visite a [documentação da Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ou assista este [tutorial em vídeo](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) para aprender mais e começar a desenvolver subgraphs mais seguros. +Visite a [documentação do Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ou veja este [tutorial em vídeo](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) para aprender como programar subgraphs mais seguros. From 3fe87aaff71eac0d771006562d716f6058b711f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:09 -0500 Subject: [PATCH 0774/1534] New translations subgraph-uncrashable.mdx (Russian) --- .../cookbook/subgraph-uncrashable.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ru/subgraphs/cookbook/subgraph-uncrashable.mdx index b6697e2ab778..f81fe52608e8 100644 --- a/website/src/pages/ru/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -1,24 +1,24 @@ --- -title: Генератор кода безопасного подграфа +title: Генератор кода безопасного субграфа --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) - это инструмент генерации кода, который генерирует набор вспомогательных функций из схемы graphql проекта. Это гарантирует, что все взаимодействия с объектами в вашем подграфе полностью безопасны и согласованны. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) — это инструмент для генерации кода, который создает набор вспомогательных функций из схемы GraphQL проекта. Он гарантирует, что все взаимодействия с объектами в Вашем субграфе будут полностью безопасными и последовательными. -## Why integrate with Subgraph Uncrashable? +## Зачем интегрироваться с Subgraph Uncrashable? -- **Непрерывное время безотказной работы**. Неправильное обращение с объектами может привести к сбою подграфов, что может привести к сбоям в проектах, зависящих от The Graph. Настройте вспомогательные функции, чтобы сделать ваши подграфы “безаварийными” и обеспечить непрерывность функционирования. +- **Непрерывная работа**. Ошибки в обработке объектов могут привести к сбоям субграфов, что нарушит работу проектов, зависящих от The Graph. Настройте вспомогательные функции, чтобы Ваши субграфы оставались «непотопляемыми» и обеспечивали стабильную работу. -- **Полностью безопасен**. При разработке подграфов часто встречаются такие проблемы, как загрузка неопределенных объектов, установка или инициализация не всех значений объектов, а также условия "гонки" при загрузке и сохранении объектов. Убедитесь, что все взаимодействия с объектами полностью последовательны. +- **Полная безопасность**. Обычные проблемы при разработке субграфов — это ошибки загрузки неопределенных элементов, неинициализированные или неустановленные значения элементов, а также гонки при загрузке и сохранении элементов. Убедитесь, что все взаимодействия с объектами полностью атомарны. -- **Настраиваемый пользователем** Установите значения по умолчанию и настройте уровень проверок безопасности, соответствующий потребностям вашего индивидуального проекта. Записываются логи предупреждений, указывающие на нарушение логики подграфа, что позволяет устранить проблему и обеспечить точность данных. +- **Настройка пользователем**. Установите значения по умолчанию и настройте уровень проверок безопасности, который соответствует потребностям Вашего индивидуального проекта. Записываются предупреждающие логи, указывающие на то, где произходит нарушение логики субграфа, что помогает исправить проблему и обеспечить точность данных. **Ключевые особенности** -- Инструмент генерации кода поддерживает **все** типы подграфов и конфигурируется для пользователей, чтобы установить разумные значения по умолчанию. Генерация кода будет использовать эту конфигурацию для создания вспомогательных функций, соответствующих спецификации пользователей. +- Инструмент генерации кода поддерживает **все** типы субграфов и настраивается таким образом, чтобы пользователи могли задать разумные значения по умолчанию. Генерация кода будет использовать эту настройку для создания вспомогательных функций в соответствии с требованиями пользователей. -- Фреймворк также включает способ (через config-файл) создания пользовательских, но безопасных функций установки для групп переменных объектов. Таким образом, пользователь не сможет загрузить/использовать устаревшую graph entity, и также невозможно забыть о сохранении или установке переменной, которая требуется функцией. +- Фреймворк также включает в себя способ создания пользовательских, но безопасных функций установки для групп переменных объектов (через config-файл). Таким образом, пользователь не сможет загрузить/использовать устаревшую graph entity, и также не сможет забыть о сохранении или установке переменной, которая требуется функцией. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Предупреждающие логи записываются как логи, указывающие на нарушение логики субграфа, чтобы помочь устранить проблему и обеспечить точность данных. Subgraph Uncrashable можно запустить как необязательный флаг с помощью команды Graph CLI codegen. @@ -26,4 +26,4 @@ Subgraph Uncrashable можно запустить как необязатель graph codegen -u [options] [] ``` -Обратитесь к документации [subgraph uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) или посмотрите [видеоурок](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial), чтобы узнать больше и приступить к разработке более безопасных подграфов. +Ознакомьтесь с [документацией по subgraph uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) или посмотрите этот [видеоруководство](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial), чтобы узнать больше и начать разрабатывать более безопасные субграфы. From e18d9c9aacd47c669ebd24216cd42c5373a13f21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:10 -0500 Subject: [PATCH 0775/1534] New translations subgraph-uncrashable.mdx (Swedish) --- .../sv/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/sv/subgraphs/cookbook/subgraph-uncrashable.mdx index c77c02c2bee6..ce8e87ecfd46 100644 --- a/website/src/pages/sv/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,19 +2,19 @@ title: Säker subgraf kodgenerator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) är ett kodgenereringsverktyg som genererar en uppsättning hjälpfunktioner från ett projekts graphql schema. Det säkerställer att alla interaktioner med enheter i din subgraf är helt säkra och konsekventa. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## Varför integrera med Subgraf Uncrashable? -- **Kontinuerlig drifttid**. Misshandlade enheter kan få subgrafer att krascha, vilket kan vara störande för projekt som är beroende av The Graph. Ställ in hjälpfunktioner för att göra dina subgrafer "ofullständig" och säkerställa kontinuitet i verksamheten. +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **Helt säkert**. Vanliga problem som ses vid subgrafutveckling är problem med att ladda odefinierade enheter, att inte ställa in eller initiera alla värden på entiteter och tävlingsförhållanden för att ladda och spara enheter. Se till att alla interaktioner med enheter är helt atomära. +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **Användarkonfigurerbar** Ställ in standardvärden och konfigurera nivån för säkerhetskontroller som passar ditt individuella projekts behov. Varningsloggar registreras och indikerar om det finns ett brott mot subgraflogik för att hjälpa till att korrigera problemet för att säkerställa datanoggrannhet. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**Viktiga egenskaper** +**Key Features** -- Kodgenereringsverktyget rymmer **alla** subgraf typer och är konfigurerbart för användare att ställa in sunda standardvärden för värden. Kodgenereringen kommer att använda denna konfiguration för att generera hjälpfunktioner som är enligt användarens specifikation. +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - Ramverket innehåller också ett sätt (via konfigurationsfilen) att skapa anpassade, men säkra, sätterfunktioner för grupper av entitetsvariabler. På så sätt är det omöjligt för användaren att ladda/använda en inaktuell grafenhet och det är också omöjligt att glömma att spara eller ställa in en variabel som krävs av funktionen. @@ -26,4 +26,4 @@ Subgraph Uncrashable kan köras som en valfri flagga med kommandot Graph CLI cod graph codegen -u [options] [] ``` -Besök [dokumentationen som inte går att krascha under subgraph](https://float-capital.github.io/float-subgraph-uncrashable/docs/) eller titta på denna [självstudievideo](https://float- capital.github.io/float-subgraph-uncrashable/docs/tutorial) för att lära dig mer och komma igång med att utveckla säkrare subgrafer. +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From f87a34ac9338fe39937ffe5bb8923e72a5670802 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:11 -0500 Subject: [PATCH 0776/1534] New translations subgraph-uncrashable.mdx (Turkish) --- .../tr/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/tr/subgraphs/cookbook/subgraph-uncrashable.mdx index fedae7827357..bd935dd33689 100644 --- a/website/src/pages/tr/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,23 +2,23 @@ title: Güvenli Subgraph Kod Oluşturucu --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/), bir proje graphql şemasından yardımcı fonksiyonlar kümesi oluşturan bir kod oluşturma aracıdır. Subgraphlarınızdaki unsurlarla olan tüm etkileşimlerin tamamen güvenli ve tutarlı olmasını temin eder. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) (çökmeyen subgraph), bir projenin graphql şemasından bir dizi yardımcı fonksiyon üreten bir kod oluşturma aracıdır. `subgraph`inizdeki varlıklarla tüm etkileşimlerin tamamen güvenli ve tutarlı olmasını sağlar. ## Neden Subgraph Uncrashable'ı entegre etmelisiniz? -- **Devamlı Çalışma Süresi**. Yanlış işlenen unsurlar subgraphlar'ın çökmesine neden olabilir ve bu da Graph'a bağlı projeler için işleri aksatabilir. Subgraphlar'ınızı "çökmez" hale getirmek ve işinizin devamlılığını sağlamak için yardımcı fonksiyonları kurun. +- **Sürekli Çalışırlık**. Yanlış yönetilen varlıklar subgraph'lerin çökmesine neden olabilir. Bu da The Graph'e bağımlı olan projeler için işleri aksatabilir. Subgraph'lerinizi "çökmez" hale getirmek ve iş sürekliliğini sağlamak için yardımcı fonksiyonları ayarlayın. -- **Tamamen Güvenli**. Subgraph geliştirme aşamasında sık görülen sorunlar, tanımsız unsurların yüklenmesi, tüm unsur değerlerinin ayarlanmaması veya başlatılmaması ve unsurları yüklerken ve kaydederken yarış koşullarıdır. Unsurlarla olan etkileşimlerin tamamının tamamen atomik(atomic) olduğundan emin olun. +- **Tamamen Güvenli**. `subgraph` geliştirme sırasında yaygın görülen sorunlar, tanımsız varlıkların yüklenmesi, varlıkların tüm değerlerinin ayarlanmaması veya ilklendirilmemesi, varlıkların yüklenmemesi ve kaydedilmemesidir. Varlıklarla tüm etkileşimlerin çok az olduğundan emin olun. -- **Kullanıcı Ayarlı** Varsayılan değerleri ayarlayın ve proje ihtiyaçlarınıza uygun olarak güvenlik kontrolleri düzeyini yapılandırın. Veri doğruluğunu sağlamak için Subgraph mantığında ihlal gerçekleştiğinde uyarı kayıtları kaydedilir ve sorunu düzeltmek için kullanılabilir. +- **Kullanıcı Tarafından Yapılandırılabilir** Varsayılan değerleri ayarlayın ve projenizin ihtiyaçlarına uygun güvenlik kontrol seviyesini yapılandırın. `Subgraph` mantığındaki bir ihlali gösteren uyarı günlükleri kaydedilir ve verilerin doğruluğunu sağlamak için sorunun giderilmesine yardımcı olunur. **Ana Özellikler** -- Kod oluşturma aracı **tüm** subgraph türlerine uygun ve kullanıcıların makul varsayılan değerler ayarlamasına izin verir. Kod oluşturma, bu yapılandırmayı kullanarak kullanıcıların belirlediği özelliklere göre yardımcı fonksiyonlar oluşturacaktır. +- Kod oluşturma aracı, **tüm** subgraph türlerini destekler ve kullanıcıların değerlerde makul varsayılanlar ayarlamaları için yapılandırılabilir. Kod oluşturma, kullanıcıların belirtimine uygun yardımcı fonksiyonlar oluşturmak için bu yapılandırmayı kullanacaktır. - Framework ayrıca unsur değişkenleri grupları için özel, ancak güvenli ayarlayıcı fonksiyonları oluşturmanın bir yolunu (yapılandırma dosyası aracılığıyla) içerir. Bu sayede, kullanıcının eski bir graph unsurunu yüklemesi/kullanması ve ayrıca fonksiyonun gerektirdiği bir değişkeni kaydetmeyi veya ayarlamayı unutması imkansız hale gelir. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- Uyarı günlükleri, subgraph mantığında bir ihlalin nerede olduğunu gösteren günlükler olarak kaydedilir ve veri doğruluğunu sağlamak için sorunun düzeltilmesine yardımcı olur. Subgraph Uncrashable, Graph CLI codegen komutu kullanılarak isteğe bağlı bir bayrak olarak çalıştırılabilir. @@ -26,4 +26,4 @@ Subgraph Uncrashable, Graph CLI codegen komutu kullanılarak isteğe bağlı bir graph codegen -u [options] [] ``` -Daha fazla bilgi edinmek ve daha güvenli subgraphlar geliştirmeye başlamak için [subgraph uncrashable dökümanını](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ziyaret edebilir veya bu [öğretici videoyu](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) izleyebilirsiniz. +Daha fazla bilgi edinmek ve daha güvenli subgraph'ler geliştirmeye başlamak için [Subgraph Uncrashable dokümantasyonuna](https://float-capital.github.io/float-subgraph-uncrashable/docs/) göz atın veya bu [video rehberini](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) izleyin. From 02141ea301f4dba762a80bbea8304bf291deecb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:12 -0500 Subject: [PATCH 0777/1534] New translations subgraph-uncrashable.mdx (Chinese Simplified) --- .../zh/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/zh/subgraphs/cookbook/subgraph-uncrashable.mdx index 5726f6409b80..959ec7b532cc 100644 --- a/website/src/pages/zh/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,19 +2,19 @@ title: 安全子图代码生成器 --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) 是一个代码生成工具,从项目的 Graphql 模式生成一组辅助函数。确保与子图中实体的所有交互都是完全安全和一致的。 +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## 为什么要整合子图使其不崩溃? -- **连续正常运行时间**。处理不当的实体可能会导致子图崩溃,这可能会破坏依赖于Graph 的项目。设置 helper 函数,使您的子图“不可崩溃”,并确保业务连续性。 +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **绝对安全**。在子图开发中常见的问题是加载未定义的实体,不设置或初始化实体的所有值,以及加载和保存实体的竞态条件。确保与实体的所有交互都是完全原子的。 +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **User Configure**设置默认值,并配置适合各个项目需要的安全检查级别。警告日志被记录下来,表明哪里存在子图逻辑的缺陷,以帮助修补这个问题,从而确保数据的准确性。 +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**主要特征** +**Key Features** -- 代码生成工具可以容纳**所有**子图类型,并且可以为用户配置,以便在值上设置合理默认值。代码生成将使用此配置生成用户规范所要求的辅助函数。 +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - 该框架还包括一种方法(通过配置文件) 为实体变量组创建自定义但安全的 setter 函数。这样,用户就不可能加载/使用过时的图形实体,也不可能忘记保存或设置函数所需的变量。 @@ -26,4 +26,4 @@ title: 安全子图代码生成器 graph codegen -u [options] [] ``` -访问[子图不崩溃的文档](https://float-capital.github.io/float-subgraph-uncrashable/docs/)或观看这个[视频教程](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial),以了解更多,并开始开发更安全的子图。 +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From c23cb9bdf541e0bd9dbdd2e692c1bed705a6c477 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:13 -0500 Subject: [PATCH 0778/1534] New translations subgraph-uncrashable.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/ur/subgraphs/cookbook/subgraph-uncrashable.mdx index 3bea4e892699..1ce722e5639d 100644 --- a/website/src/pages/ur/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,19 +2,19 @@ title: محفوظ سب گراف کوڈ جنریٹر --- -[سب گراف ان کریش ایبل](https://float-capital.github.io/float-subgraph-uncrashable/) ایک کوڈ جنریشن ٹول ہے جو کسی پروجیکٹ کے GraphQL اسکیما سے مددگار فنکشنز کا ایک سیٹ تیار کرتا ہے۔. یہ یقینی بناتا ہے کہ آپ کے سب گراف میں موجود ہستیوں کے ساتھ تمام تعاملات مکمل طور پر محفوظ اور مستقل ہوں. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## سب گراف ان کریش ایبل کے ساتھ کیوں ضم کیا جائے؟ -- **مسلسل اپ ٹائم**۔ غلط ہستیوں کی وجہ سے سب گراف کریش ہو سکتے ہیں، جو گراف پر منحصر منصوبوں کے لیے خلل ڈال سکتے ہیں۔ اپنے سب گراف کو "ناقابل شکست" بنانے اور کاروبار کے تسلسل کو یقینی بنانے کے لیے مددگار فنکشنز ترتیب دیں. +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **مکمل طور پر محفوظ**۔ سب گراف ڈیولپمنٹ میں جو عام مسائل نظر آتے ہیں وہ غیر متعینہ ہستیوں کو لوڈ کرنے، اداروں کی تمام قدروں کو ترتیب دینے یا شروع نہ کرنے، اور اداروں کو لوڈ کرنے اور بچانے پر ریس کے حالات ہیں۔ یقینی بنائیں کہ ہستیوں کے ساتھ تمام تعاملات مکمل طور پر جوہری ہیں. +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **صارف قابل ترتیب** پہلے سے طے شدہ اقدار کو سیٹ کریں اور حفاظتی جانچ کی سطح کو ترتیب دیں جو آپ کے انفرادی پروجیکٹ کی ضروریات کے مطابق ہو۔ انتباہی لاگز ریکارڈ کیے جاتے ہیں جو اس بات کی نشاندہی کرتے ہیں کہ ڈیٹا کی درستگی کو یقینی بنانے کے لیے مسئلے کو پیچ کرنے میں مدد کرنے کے لیے سب گراف کی منطق کی کہاں خلاف ورزی ہوئی ہے. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**اہم خصوصیات** +**Key Features** -- کوڈ جنریشن ٹول **تمام** سب گراف کی اقسام کو ایڈجسٹ کرتا ہے اور صارفین کے لیے قدروں پر سمجھدار ڈیفالٹس سیٹ کرنے کے لیے قابل ترتیب ہے۔ کوڈ جنریشن اس کنفیگریشن کو مددگار فنکشنز جنریٹ کرنے کے لیے استعمال کرے گی جو صارفین کی تفصیلات کے لیے ہیں. +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - فریم ورک میں ہستی متغیرات کے گروپس کے لیے حسب ضرورت، لیکن محفوظ، سیٹر فنکشنز بنانے کا ایک طریقہ (کنفگ فائل کے ذریعے) بھی شامل ہے۔ اس طرح صارف کے لیے کسی باسی گراف ہستی کو لوڈ/استعمال کرنا ناممکن ہے اور فنکشن کے لیے مطلوبہ متغیر کو محفوظ کرنا یا سیٹ کرنا بھولنا بھی ناممکن ہے. @@ -26,4 +26,4 @@ title: محفوظ سب گراف کوڈ جنریٹر graph codegen -u [options] [] ``` -[سب گراف ان کریش ایبل دستاویزات](https://float-capital.github.io/float-subgraph-uncrashable/docs/) دیکھیں یا اسے دیکھیں [ویڈیو ٹیوٹوریل](https://float- capital.github.io/float-subgraph-uncrashable/docs/tutorial) مزید جاننے اور محفوظ سب گراف تیار کرنے کے ساتھ شروع کرنے کے لیے. +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From fb918c234418b3a8d8088c10f945b377ee2377f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:15 -0500 Subject: [PATCH 0779/1534] New translations subgraph-uncrashable.mdx (Marathi) --- .../subgraphs/cookbook/subgraph-uncrashable.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/mr/subgraphs/cookbook/subgraph-uncrashable.mdx index 1b8e106d4b03..9a7e3d9f008e 100644 --- a/website/src/pages/mr/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,19 +2,19 @@ title: सुरक्षित सबग्राफ कोड जनरेटर --- -[सबग्राफ अनक्रॅश करण्यायोग्य](https://float-capital.github.io/float-subgraph-uncrashable/) हे कोड जनरेशन टूल आहे जे प्रोजेक्टच्या graphql स्कीमामधून हेल्पर फंक्शन्सचा संच तयार करते. हे सुनिश्चित करते की आपल्या सबग्राफमधील घटकांसह सर्व परस्परसंवाद पूर्णपणे सुरक्षित आणि सुसंगत आहेत. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## Subgraph Uncrashable सह समाकलित का? -- **सतत अपटाइम**. चुकीच्या पद्धतीने हाताळलेल्या घटकांमुळे सबग्राफ क्रॅश होऊ शकतात, जे ग्राफवर अवलंबून असलेल्या प्रकल्पांसाठी व्यत्यय आणू शकतात. तुमचे सबग्राफ "अनक्रॅश करण्यायोग्य" बनवण्यासाठी आणि व्यवसाय सातत्य सुनिश्चित करण्यासाठी हेल्पर फंक्शन्स सेट करा. +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **पूर्णपणे सुरक्षित**. सबग्राफ डेव्हलपमेंटमध्ये दिसणार्‍या सामान्य समस्या म्हणजे अपरिभाषित घटक लोड करणे, घटकांची सर्व मूल्ये सेट करणे किंवा प्रारंभ न करणे आणि घटक लोड करणे आणि जतन करणे यावरील शर्यतीची परिस्थिती. घटकांसह सर्व परस्परसंवाद पूर्णपणे अणू आहेत याची खात्री करा. +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **वापरकर्ता कॉन्फिगर करण्यायोग्य** डीफॉल्ट मूल्ये सेट करा आणि तुमच्या वैयक्तिक प्रकल्पाच्या गरजेनुसार सुरक्षा तपासणीची पातळी कॉन्फिगर करा. डेटा अचूकता सुनिश्चित करण्यासाठी समस्या पॅच करण्यात मदत करण्यासाठी सबग्राफ लॉजिकचे उल्लंघन कोठे आहे हे दर्शविणारे चेतावणी लॉग रेकॉर्ड केले जातात. +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**महत्वाची वैशिष्टे** +**Key Features** -- कोड जनरेशन टूल **सर्व** सबग्राफ प्रकारांना सामावून घेते आणि वापरकर्त्यांसाठी मूल्यांवर योग्य डीफॉल्ट सेट करण्यासाठी कॉन्फिगर करण्यायोग्य आहे. कोड जनरेशन या कॉन्फिगरेशनचा वापर वापरकर्त्यांच्या स्पेसिफिकेशनसाठी असणारी हेल्पर फंक्शन्स व्युत्पन्न करण्यासाठी करेल. +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - फ्रेमवर्कमध्ये एंटिटी व्हेरिएबल्सच्या गटांसाठी सानुकूल, परंतु सुरक्षित, सेटर फंक्शन्स तयार करण्याचा मार्ग (कॉन्फिग फाइलद्वारे) देखील समाविष्ट आहे. अशा प्रकारे वापरकर्त्याला जुना आलेख घटक लोड करणे/वापरणे अशक्य आहे आणि फंक्शनसाठी आवश्यक असलेले व्हेरिएबल सेव्ह करणे किंवा सेट करणे विसरणे देखील अशक्य आहे. @@ -23,7 +23,7 @@ title: सुरक्षित सबग्राफ कोड जनरेट ग्राफ CLI codegen कमांड वापरून Subgraph Uncrashable हा पर्यायी ध्वज म्हणून चालवला जाऊ शकतो. ```sh -आलेख कोडजेन -यू [options] [] +graph codegen -u [options] [] ``` -[सबग्राफ अनक्रॅश करण्यायोग्य दस्तऐवज](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ला भेट द्या किंवा हे पहा [व्हिडिओ ट्यूटोरियल](https://float- capital.github.io/float-subgraph-uncrashable/docs/tutorial) अधिक जाणून घेण्यासाठी आणि सुरक्षित सबग्राफ विकसित करण्यास प्रारंभ करण्यासाठी. +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From 822fe945bc82fe0a59a94ab01de522d5692b2d42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:16 -0500 Subject: [PATCH 0780/1534] New translations subgraph-uncrashable.mdx (Hindi) --- .../hi/subgraphs/cookbook/subgraph-uncrashable.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/subgraph-uncrashable.mdx b/website/src/pages/hi/subgraphs/cookbook/subgraph-uncrashable.mdx index 5c28f6161087..ace90495aef8 100644 --- a/website/src/pages/hi/subgraphs/cookbook/subgraph-uncrashable.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/subgraph-uncrashable.mdx @@ -2,19 +2,19 @@ title: सुरक्षित सबग्राफ कोड जेनरेटर --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) एक कोड जनरेशन टूल है जो किसी प्रोजेक्ट के ग्राफ़क्यूएल स्कीमा से सहायक फंक्शन्स का एक सेट उत्पन्न करता है. यह सुनिश्चित करता है कि आपके सबग्राफ में इकाइयों के साथ सभी इंटरैक्शन पूरी तरह से सुरक्षित और एक जैसा हैं। +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. ## सबग्राफ अनक्रैशेबल के साथ एकीकृत क्यों करें? -- ** निरंतर अपटाइम**। गलत तरीके से हैंडल की गई इकाइयां सबग्राफ को क्रैश कर सकती हैं, जो कि ग्राफ़ पर निर्भर परियोजनाओं के लिए हानिकारक हो सकता है। अपने सबग्राफ को "अनक्रैशेबल" बनाने और व्यवसाय की निरंतरता सुनिश्चित करने के लिए सहायक फंक्शन्स सेट करें। +- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. -- **पूरी तरह से सुरक्षित**। सबग्राफ विकास में देखी जाने वाली साधारण समस्याएं हैं अपरिभाषित इकाइयों का लोड होना, इकाइयों के सभी मूल्यों को स्थापित या प्रारंभ नहीं करना, और इकाइयों को लोड करने और सेव करने पर दौड़ की स्थिति का होना। सुनिश्चित करें कि इकाइयों के साथ सभी इंटरैक्शन पूरी तरह से एटोमिक हो। +- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. -- **उपयोगकर्ता विन्यास योग्य** डिफ़ॉल्ट मान सेट करें और सुरक्षा जांच के स्तर को कॉन्फ़िगर करें जो आपकी व्यक्तिगत परियोजना की आवश्यकताओं के अनुरूप हो। डेटा सटीकता सुनिश्चित करने के लिए समस्या को ठीक करने में मदद करने के लिए चेतावनी लॉग रिकॉर्ड किए जाते हैं जहां सबग्राफ तर्क का उल्लंघन होता है। +- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. -**प्रमुख विशेषताऐं** +**Key Features** -- कोड जनरेशन टूल **सभी** सबग्राफ प्रकारों को समायोजित करता है और उपयोगकर्ताओं के लिए मूल्यों पर समझदार डिफ़ॉल्ट सेट करने के लिए कॉन्फ़िगर करने योग्य है। कोड जनरेशन इस कॉन्फिग का उपयोग सहायक फंक्शन्स को उत्पन्न करने के लिए करेगा जो उपयोगकर्ता विनिर्देश के लिए हैं। +- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - फ्रेमवर्क में इकाई वैरिएबल के समूहों के लिए कस्टम, लेकिन सुरक्षित, सेटर फ़ंक्शन बनाने का एक तरीका (कॉन्फिग फ़ाइल के माध्यम से) भी शामिल है। इस तरह उपयोगकर्ता के लिए एक पुरानी ग्राफ़ इकाई को लोड/उपयोग करना असंभव है और फ़ंक्शन द्वारा आवश्यक वैरिएबल को सहेजना या सेट करना भूलना भी असंभव है। @@ -26,4 +26,4 @@ title: सुरक्षित सबग्राफ कोड जेनरे graph codegen -u [options] [] ``` -अधिक जानने के लिए और सुरक्षित सबग्राफ विकसित करने के साथ आरंभ करने के लिए [सबग्राफ अनक्रैशेबल डॉक्यूमेंटेशन](https://float-capital.github.io/float-subgraph-uncrashable/docs/) पर जाएं या यह [वीडियो ट्यूटोरियल](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) देखें। +Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. From d1203d8bcd7bdf1a6fb4e694a0efdcac8d4a5419 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:17 -0500 Subject: [PATCH 0781/1534] New translations timeseries.mdx (Romanian) --- website/src/pages/ro/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ro/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ro/subgraphs/cookbook/timeseries.mdx index 0168be53d7ed..fdee00c65f2b 100644 --- a/website/src/pages/ro/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From a6328bab3973fcb7c24a5687a1eaa0a2b1c6a8a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:18 -0500 Subject: [PATCH 0782/1534] New translations timeseries.mdx (French) --- website/src/pages/fr/subgraphs/cookbook/timeseries.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/timeseries.mdx b/website/src/pages/fr/subgraphs/cookbook/timeseries.mdx index 82176f96bdfd..650083545de4 100644 --- a/website/src/pages/fr/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR @@ -27,7 +28,7 @@ Timeseries and aggregations reduce data processing overhead and accelerate queri - Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. - Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. -### Important Considerations +### Points Importants - Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. - Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. From c41ce5620c33704120b1e4ddc0c33819b40a7fbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:19 -0500 Subject: [PATCH 0783/1534] New translations timeseries.mdx (Spanish) --- website/src/pages/es/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/es/subgraphs/cookbook/timeseries.mdx b/website/src/pages/es/subgraphs/cookbook/timeseries.mdx index 1dd08ab764d1..f4437aad4572 100644 --- a/website/src/pages/es/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/es/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From 27c41ab806de70dbfc4c261f01a9a2fc0089245f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:20 -0500 Subject: [PATCH 0784/1534] New translations timeseries.mdx (Arabic) --- website/src/pages/ar/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ar/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ar/subgraphs/cookbook/timeseries.mdx index 03ac4f323fa4..ccc650938778 100644 --- a/website/src/pages/ar/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From 87ca07cdd2917fbdf8b530682cd1b7f3a25e7263 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:21 -0500 Subject: [PATCH 0785/1534] New translations timeseries.mdx (Czech) --- website/src/pages/cs/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/cs/subgraphs/cookbook/timeseries.mdx b/website/src/pages/cs/subgraphs/cookbook/timeseries.mdx index 7178891769da..b6412dd02e46 100644 --- a/website/src/pages/cs/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From 27043834f190c6a98dd325554045a2ba31049392 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:22 -0500 Subject: [PATCH 0786/1534] New translations timeseries.mdx (German) --- website/src/pages/de/subgraphs/cookbook/timeseries.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/cookbook/timeseries.mdx b/website/src/pages/de/subgraphs/cookbook/timeseries.mdx index 0168be53d7ed..303646970e05 100644 --- a/website/src/pages/de/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/de/subgraphs/cookbook/timeseries.mdx @@ -1,12 +1,13 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. -## Overview +## Überblick Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. From 84f96ed898e9bfdd358edf677ac950d3f1574c88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:23 -0500 Subject: [PATCH 0787/1534] New translations timeseries.mdx (Italian) --- website/src/pages/it/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/it/subgraphs/cookbook/timeseries.mdx b/website/src/pages/it/subgraphs/cookbook/timeseries.mdx index eeb246a2b3d0..3a2fe2ea5a07 100644 --- a/website/src/pages/it/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/it/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From 17b02688d15018c9c8f0002dfdb0974d51742063 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:24 -0500 Subject: [PATCH 0788/1534] New translations timeseries.mdx (Japanese) --- website/src/pages/ja/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ja/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ja/subgraphs/cookbook/timeseries.mdx index 6ebf5e48a235..6fe7080340dd 100644 --- a/website/src/pages/ja/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From b9492f36da39fc73f662957836252d30a9930e2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:25 -0500 Subject: [PATCH 0789/1534] New translations timeseries.mdx (Korean) --- website/src/pages/ko/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ko/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ko/subgraphs/cookbook/timeseries.mdx index 0168be53d7ed..fdee00c65f2b 100644 --- a/website/src/pages/ko/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From c0f825d7f976b9dc6c5fb07f6159716f48a48306 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:26 -0500 Subject: [PATCH 0790/1534] New translations timeseries.mdx (Dutch) --- website/src/pages/nl/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/nl/subgraphs/cookbook/timeseries.mdx b/website/src/pages/nl/subgraphs/cookbook/timeseries.mdx index 0168be53d7ed..fdee00c65f2b 100644 --- a/website/src/pages/nl/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From c6932e2730e7c2a70fea32350c6ed32d3698088a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:27 -0500 Subject: [PATCH 0791/1534] New translations timeseries.mdx (Polish) --- website/src/pages/pl/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/pl/subgraphs/cookbook/timeseries.mdx b/website/src/pages/pl/subgraphs/cookbook/timeseries.mdx index 0168be53d7ed..fdee00c65f2b 100644 --- a/website/src/pages/pl/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From 8440b8e27ca7e0f6ca5a5342f1ea11ecc6426480 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:28 -0500 Subject: [PATCH 0792/1534] New translations timeseries.mdx (Portuguese) --- .../pt/subgraphs/cookbook/timeseries.mdx | 149 +++++++++--------- 1 file changed, 75 insertions(+), 74 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/timeseries.mdx b/website/src/pages/pt/subgraphs/cookbook/timeseries.mdx index 9c2a8632c5b6..f1e123bdaae8 100644 --- a/website/src/pages/pt/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/timeseries.mdx @@ -1,48 +1,49 @@ --- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +title: "Melhores Práticas para um Subgraph #5 — Simplifique e Otimize com Séries Temporais e Agregações" +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. +Tirar vantagem de séries temporais e agregações em subgraphs pode melhorar bastante a velocidade da indexação e o desempenho dos queries. ## Visão geral -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. +Séries temporais e agregações reduzem a sobrecarga do processamento de dados e aceleram queries, ao descarregar computações de agregação para o banco de dados e simplificar o código de mapeamento. Essa abordagem é particularmente eficaz ao lidar com grandes volumes de dados baseados em tempo. -## Benefits of Timeseries and Aggregations +## Vantagens de Séries Temporais e Agregações -1. Improved Indexing Time +1. **Indexação Mais Rápida** -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. +- Menos Dados para Carregar: Os mapeamentos manuseiam menos dados, pois os pontos de dados brutos são armazenados como entidades imutáveis de séries temporais. +- Agregações Geridas pelo Banco de Dados: As agregações são computadas automaticamente pelo banco de dados, reduzindo a carga de trabalho nos mapeamentos. -2. Simplified Mapping Code +2. **Código de Mapeamentos Simplificado** -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. +- Sem Cálculos Manuais: os programadores não precisam mais escrever lógica complexa de agregação em mapeamentos. +- Complexidade Reduzida: Simplifica a manutenção de código e minimiza o potencial de erros. -3. Dramatically Faster Queries +3. **Queries Muito Mais Rápidos** -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. +- Dados Imutáveis: Todos os dados de séries temporais são imutáveis, deixando o armazenamento e resgate de dados muito mais eficiente. +- Separação de Dados Eficiente: Os dados agregados são armazenados separadamente dos brutos de séries temporais, permitindo que os queries processem muito menos dados — geralmente várias ordens de magnitude a menos. ### Considerações Importantes -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. +- Dados Imutáveis: os dados de séries temporais não podem ser alterados após gravados, garantindo a integridade e simplificando a indexação. +- Gestão Automática de ID e Registos de Tempo: os campos de ID, e de registo de data e hora, são administrados automaticamente pelo graph-node, reduzindo possíveis erros. +- **Armazenamento Eficiente:** ao separar dados brutos dos agregados, o armazenamento é otimizado e queries são executados mais rapidamente. -## How to Implement Timeseries and Aggregations +## Como Implementar Séries Temporais e Agregações -### Defining Timeseries Entities +### Como Definir Entidades de Séries Temporais -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: +Uma entidade de série temporal representa pontos de dados brutos coletados gradativamente. Ela é definida com a anotação `@entity(timeseries: true)`. Requisitos principais: -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. +- Imutável: Entidades de série temporal são sempre imutáveis. +- Campos Obrigatórios: + - `id`: Deve ser do tipo `Int8!` e é automaticamente incrementada. + - `timestamp`: Deve ser do tipo `Timestamp!` e é automaticamente configurada no registro de data e hora do bloco. Exemplo: @@ -54,12 +55,12 @@ type Data @entity(timeseries: true) { } ``` -### Defining Aggregation Entities +### Como Definir Entidades de Agregação -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: +Uma entidade de agregação calcula valores agregados de uma fonte de série temporal. Ela é definida com a anotação `@aggregation`. Componentes principais: -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). +- Argumentos de Anotação: + - `intervals`: Especifica intervalos de tempo (por exemplo: `["hour", "day"]`). Exemplo: @@ -71,11 +72,11 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. +Neste exemplo, o campo `Stats` ("Estatísticas") agrega o campo de preços de Data de hora em hora, diariamente, e computa a soma. -### Querying Aggregated Data +### Queries de Dados Agregados -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. +As agregações são expostas via campos de query que permitem filtragem e resgate com base em dimensões e intervalos de tempo. Exemplo: @@ -97,13 +98,13 @@ Exemplo: } ``` -### Using Dimensions in Aggregations +### Como Usar Dimensões em Agregações -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. +Dimensões são campos não agregados, usados ​​para agrupar pontos de dados. Elas permitem agregações com base em critérios específicos, como um token num aplicativo financeiro. Exemplo: -### Timeseries Entity +### Entidade de Série Temporal ```graphql type TokenData @entity(timeseries: true) { @@ -115,7 +116,7 @@ type TokenData @entity(timeseries: true) { } ``` -### Aggregation Entity with Dimension +### Entidade de Agregação com Dimensão ```graphql type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { @@ -128,67 +129,67 @@ type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { } ``` -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. +- Campo de Dimensão: `token` agrupa os dados, então os agregados são computados por token. +- Dados Agregados: + - totalVolume: Soma da quantia. + - priceUSD: último `priceUSD` (preço em dólares americanos) registrado. + - count: Contagem cumulativa dos registos. -### Aggregation Functions and Expressions +### Funções e Expressões de Agregação -Supported aggregation functions: +Funções de agregação apoiadas: -- sum -- count -- min -- max -- first -- last +- sum ("Soma") +- count ("Contagem") +- min ("Mínimo") +- max ("Máximo") +- first ("Primeiro") +- last ("Último") -### The arg in @aggregate can be +### O argumento em @aggregate pode ser -- A field name from the timeseries entity. -- An expression using fields and constants. +- Um nome de campo da entidade de série temporal. +- Uma expressão com campos e constantes. -### Examples of Aggregation Expressions +### Exemplos de Expressões de Agregação -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") +- Soma do Valor do Token: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Quantia Positiva Máxima: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Soma Condicional: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. +Os operadores e funções suportados incluem aritmética básica (+, -, \_, /), operadores de comparação, operadores lógicos (`and`, `or`, `not`) e funções SQL como `least`, `greatest`, `coalesce`, etc. -### Query Parameters +### Parâmetros de Query -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). +- `interval`: Especifica intervalos de tempo (por exemplo: "hour"). +- `where`: Aplica filtros com base em dimensões e alcance de tempo. +- `timestamp_gte` / `timestamp_lt: ` Aplica filtros para início e fim de tempo (microssegundos desde o epoch). -### Notes +### Notas -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. +- Classificação: Os resultados são automaticamente organizados por data e hora, e id, em ordem descendente. +- Dados Atuais: Um argumento atual opcional pode incluir o intervalo corrente, parcialmente preenchido. ### Conclusão -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: +Implementar séries temporais e agregações em subgraphs é recomendado para projetos que lidam com dados baseados em tempo. Esta abordagem: -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. +- Melhora o Desempenho: Acelera a indexação e os queries ao reduzir a carga de processamento de dados. +- Simplifica a Produção: Elimina a necessidade de lógica de agregação manual em mapeamentos. +- Escala Eficientemente: Manuseia grandes quantias de dados sem comprometer a velocidade ou a capacidade de resposta. -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. +Ao adotar esse padrão, os programadores podem criar subgraphs mais eficientes e escaláveis, fornecendo acesso mais rápido e confiável de dados aos utilizadores finais. Para saber mais sobre como implementar séries temporais e agregações, consulte o [Leia-me sobre Séries Temporais e Agregações](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) e experimente esse recurso nos seus subgraphs. -## Subgraph Best Practices 1-6 +## Melhores Práticas para um Subgraph 1 – 6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Pruning: Reduza o Excesso de Dados do Seu Subgraph para Acelerar Queries](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Use o @derivedFrom para Melhorar a Resposta da Indexação e de Queries](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Melhore o Desempenho da Indexação e de Queries com o Uso de Bytes como IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Evite `eth-calls` para Acelerar a Indexação](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplifique e Otimize com Séries Temporais e Agregações](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Lance Hotfixes Mais Rápido com Enxertos](/subgraphs/cookbook/grafting-hotfix/) From aeb652ef74ffe570af104815ba159c8ba56d9815 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:29 -0500 Subject: [PATCH 0793/1534] New translations timeseries.mdx (Russian) --- .../ru/subgraphs/cookbook/timeseries.mdx | 143 +++++++++--------- 1 file changed, 72 insertions(+), 71 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ru/subgraphs/cookbook/timeseries.mdx index a1fbbfc6ee87..26de01d5e27d 100644 --- a/website/src/pages/ru/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/timeseries.mdx @@ -1,48 +1,49 @@ --- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +title: Лучшие практики субграфов №5 — Упрощение и оптимизация с помощью временных рядов и агрегаций +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- -## TLDR +## Краткое содержание -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. +Использование новой функции временных рядов и агрегаций в субграфах может значительно улучшить как скорость индексирования, так и производительность запросов. ## Обзор -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. +Временные ряды и агрегации снижают накладные расходы на обработку данных и ускоряют запросы, перенося вычисления агрегаций на базу данных и упрощая код отображений. Этот подход особенно эффективен при обработке больших объемов данных, основанных на времени. -## Benefits of Timeseries and Aggregations +## Преимущества временных рядов и агрегаций -1. Improved Indexing Time +1. Улучшенное время индексирования -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. +- Меньше данных для загрузки: мэппинги обрабатывают меньше данных, поскольку необработанные данные хранятся в виде неизменяемых объектов временных рядов. +- Агрегации, управляемые базой данных: агрегации вычисляются автоматически базой данных, что снижает нагрузку на мэппинги. -2. Simplified Mapping Code +2. Упрощённый код мэппинга -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. +- Отсутствие ручных вычислений: разработчикам больше не нужно писать сложную логику агрегации в мэппингах. +- Снижение сложности: упрощает обслуживание кода и минимизирует вероятность ошибок. -3. Dramatically Faster Queries +3. Существенное ускорение запросов -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. +- Неизменяемые данные: все данные временных рядов неизменяемы, что обеспечивает эффективное хранение и извлечение. +- Эффективное разделение данных: агрегаты хранятся отдельно от необработанных данных временных рядов, что позволяет запросам обрабатывать значительно меньше данных — часто на несколько порядков меньше. -### Important Considerations +### Важные замечания -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. +- Незаменяемость данных: данные временных рядов не могут быть изменены после записи, что обеспечивает целостность данных и упрощает индексирование. +- Автоматическое управление ID и метками времени: поля id и timestamp автоматически управляются graph-node, что снижает вероятность ошибок. +- Эффективное хранение данных: разделение необработанных данных и агрегатов оптимизирует хранилище и ускоряет выполнение запросов. -## How to Implement Timeseries and Aggregations +## Как внедрить временные ряды и агрегации -### Defining Timeseries Entities +### Определение объектов временных рядов -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: +Объект временного ряда представляет собой необработанные данные, собранные с течением времени. Он определяется с помощью аннотации `@entity(timeseries: true)`. Ключевые требования: -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. +- Неизменяемость: объекты временного ряда всегда неизменяемы. +- Обязательные поля: + - `id`: должен быть типа `Int8!` и автоматически увеличиваться. + - `timestamp`: должен быть типа `Timestamp!` и автоматически устанавливаться в соответствии с временной меткой блока. Пример: @@ -54,12 +55,12 @@ type Data @entity(timeseries: true) { } ``` -### Defining Aggregation Entities +### Определение объектов агрегаций -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: +Объект агрегации вычисляет агрегированные значения из источника данных временного ряда. Он определяется с помощью аннотации `@aggregation`. Ключевые компоненты: -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). +- Аргументы аннотации: + - `intervals`: указывает временные интервалы (например, `["hour", "day"]`). Пример: @@ -71,11 +72,11 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. +В этом примере статистика агрегирует поле цены из данных за часовые и дневные интервалы, вычисляя сумму. -### Querying Aggregated Data +### Запрос агрегированных данных -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. +Агрегации предоставляются через поля запросов, которые позволяют фильтровать и извлекать данные на основе измерений и временных интервалов. Пример: @@ -97,13 +98,13 @@ Aggregations are exposed via query fields that allow filtering and retrieval bas } ``` -### Using Dimensions in Aggregations +### Использование измерений в агрегациях -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. +Измерения — это неагрегированные поля, которые используются для группировки точек данных. Они позволяют выполнять агрегации на основе определённых критериев, таких как токен в финансовом приложении. Пример: -### Timeseries Entity +### Объект временного ряда ```graphql type TokenData @entity(timeseries: true) { @@ -115,7 +116,7 @@ type TokenData @entity(timeseries: true) { } ``` -### Aggregation Entity with Dimension +### Объект агрегации с измерением ```graphql type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { @@ -128,15 +129,15 @@ type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { } ``` -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. +- Поле измерения: `token` группирует данные, так что агрегированные значения вычисляются для каждого токена. +- Агрегаты: + - totalVolume: сумма количества. + - priceUSD: последняя зафиксированная цена в USD. + - count: кумулятивное количество записей. -### Aggregation Functions and Expressions +### Функции агрегации и выражения -Supported aggregation functions: +Поддерживаемые функции агрегации: - sum - count @@ -145,50 +146,50 @@ Supported aggregation functions: - first - last -### The arg in @aggregate can be +### Аргумент в `@aggregate` может быть -- A field name from the timeseries entity. -- An expression using fields and constants. +- Названием поля из объекта временных рядов. +- Выражением, использующим поля и константы. -### Examples of Aggregation Expressions +### Примеры выражений агрегации -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") +- Сумма стоимости токена: @aggregate(fn: "sum", arg: "priceUSD \* amount") +- Максимальное положительное значение суммы: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Условная сумма: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. +Поддерживаемые операторы и функции включают базовую арифметику (+, -, \*, /), операторы сравнения, логические операторы (and, or, not), а также SQL-функции, такие как greatest, least, coalesce и другие. -### Query Parameters +### Параметры запроса -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). +- interval: указывает временной интервал (например, "час"). +- where: фильтрует данные по измерениям и диапазонам временных меток. +- timestamp_gte / timestamp_lt: фильтрует по времени начала и окончания (микросекунды с эпохи). -### Notes +### Примечания -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. +- Сортировка: результаты автоматически сортируются по временным меткам и идентификатору в порядке убывания. +- Текущие данные: опциональный текущий аргумент может включать текущий, частично заполненный интервал. -### Conclusion +### Заключение -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: +Внедрение временных рядов и агрегаций в субграфы является лучшей практикой для проектов, работающих с данными, зависящими от времени. Этот подход: -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. +- Улучшает производительность: ускоряет индексирование и запросы, снижая нагрузку на обработку данных. +- Упрощает разработку: устраняет необходимость в ручном написании логики агрегации в мэппингах. +- Эффективно масштабируется: обрабатывает большие объемы данных, не ухудшая скорость и отзывчивость. -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. +Применяя этот шаблон, разработчики могут создавать более эффективные и масштабируемые субграфы, обеспечивая более быстрый и надежный доступ к данным для конечных пользователей. Чтобы узнать больше о внедрении временных рядов и агрегаций, обратитесь к [Руководству по временным рядам и агрегациям](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) и рассмотрите возможность использования этой функции в своих субграфах. -## Subgraph Best Practices 1-6 +## Лучшие практики для субграфов 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Увеличение скорости запросов с помощью обрезки субграфов](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Улучшение индексирования и отклика запросов с использованием @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Улучшение индексирования и производительности запросов с использованием неизменяемых объектов и байтов в качестве идентификаторов](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Увеличение скорости индексирования путем избегания `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Упрощение и оптимизация с помощью временных рядов и агрегаций](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Использование переноса (графтинга) для быстрого развертывания исправлений](/subgraphs/cookbook/grafting-hotfix/) From 7cd9102e4ae8a3947b973794361654d8aa8ab885 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:30 -0500 Subject: [PATCH 0794/1534] New translations timeseries.mdx (Swedish) --- website/src/pages/sv/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/sv/subgraphs/cookbook/timeseries.mdx b/website/src/pages/sv/subgraphs/cookbook/timeseries.mdx index 76678087aca2..0b380faea58b 100644 --- a/website/src/pages/sv/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From 56be5c2d3fe3765a712036aed870518440ba088a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:31 -0500 Subject: [PATCH 0795/1534] New translations timeseries.mdx (Turkish) --- .../tr/subgraphs/cookbook/timeseries.mdx | 155 +++++++++--------- 1 file changed, 78 insertions(+), 77 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/timeseries.mdx b/website/src/pages/tr/subgraphs/cookbook/timeseries.mdx index e7b242bfa2f3..4e23a34130d9 100644 --- a/website/src/pages/tr/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/timeseries.mdx @@ -1,48 +1,49 @@ --- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +title: Subgraph Örnek Uygulama 5 - Zaman serileri ve Toplulaştırma ile Basitleştirip Optimize Edin +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- -## TLDR +## Özet -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. +Subgraph'lerdeki yeni zaman serisi ve toplulaştırma özelliğini kullanmak, hem endeksleme hızını hem de sorgu performansını önemli ölçüde artırabilir. ## Genel Bakış -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. +Zaman serisi ve toplulaştırmalar, toplulaştırma hesaplamalarını veritabanına aktararak ve eşleme kodunu basitleştirerek, veri işleme yükünü azaltır ve sorguları hızlandırır. Bu yaklaşım, özellikle büyük hacimli zamana dayalı verilerle başa çıkarken etkilidir. -## Benefits of Timeseries and Aggregations +## Zaman Serisi ve Toplulaştırmaların Faydaları -1. Improved Indexing Time +1. İyileştirilmiş Endeksleme Süresi -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. +- Yüklenmesi Gereken Daha Az Veri: Ham veri noktaları değiştirilemez zaman serisi varlıkları olarak depolandığı için, eşlemeler daha az veri işler. +- Veritabanı Tarafından Yönetilen Toplulaştırmalar: Toplulaştırmalar otomatik olarak veritabanı tarafından hesaplanır, bu da eşlemeler üzerindeki yükü azaltır. -2. Simplified Mapping Code +2. Basitleştirilmiş Eşleme Kodu -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. +- Manuel Hesaplama Olmaması: Geliştiriciler artık eşlemelerde karmaşık toplulaştırma mantığı yazmak zorunda değildir. +- Azaltılmış Karmaşıklık: Kod bakımını basitleştirir ve hata olasılığını en aza indirir. -3. Dramatically Faster Queries +3. Çok Daha Hızlı Sorgular -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. +- Değişmez Veri: Tüm zaman serisi verileri değişmezdir. Bu da verimli depolama ve geri çağırmayı mümkün kılar. +- Verimli Veri Ayrımı: Toplulaştırmalar, ham zaman serisi verilerinden ayrı olarak depolanır, bu da sorguların önemli ölçüde —genellikle birkaç büyüklük derecesi— daha az veri işlemesine olanak tanır. -### Important Considerations +### Önemli Hususlar -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. +- Değişmez Veri: Zaman serisi verileri yazıldıktan sonra değiştirilemez, bu da veri bütünlüğünü sağlar ve endekslemeyi basitleştirir. +- Otomatik Kimlık Numarası ve Zaman Damgası Yönetimi: id (kimlik numarası) ve timestamp (zaman damgası) alanları graph-node tarafından otomatik olarak yönetilir, potansiyel hataları azaltır. +- Verimli Veri Depolama: Ham verileri toplulaştırmalardan ayırarak, depolama optimize edilir ve sorgular daha hızlı çalışır. -## How to Implement Timeseries and Aggregations +## Zaman Serisi ve Toplulaştırmaları Nasıl Uygulanır -### Defining Timeseries Entities +### Zaman Serisi Varlıklarının Tanımlanması -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: +Bir zaman serisi varlığı, zaman içinde toplanan ham veri noktalarını temsil eder. `@entity(timeseries: true)` notasyonu ile tanımlanır. Ana gereksinimler: -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. +- Değişmezlik: Zaman serisi varlıkları her zaman değişmezdir. +- Zorunlu Alanlar: + - `id`: `Int8!` türünde olmalı ve otomatik olarak artmalıdır. + - `timestamp`: `Timestamp!` türünde olmalı ve blok zaman damgasına otomatik olarak ayarlanmalıdır. Örnek: @@ -54,12 +55,12 @@ type Data @entity(timeseries: true) { } ``` -### Defining Aggregation Entities +### Toplulaştırma Varlıklarını Tanımlama -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: +Bir toplulaştırma varlığı, bir zaman serisi kaynağından toplulaştırılmış değerleri hesaplar. `@aggregation` ile tanımlanır. Ana bileşenler: -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). +- Açıklama Argümanları: + - `intervals`: Zaman aralıklarını belirtir (örneğin, `["hour", "day"]`). Örnek: @@ -71,11 +72,11 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { } ``` -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. +Bu örnekte Stats, Data'dan fiyat alanını saatlik ve günlük aralıklar üzerinden toplulaştırarak toplamı hesaplar. -### Querying Aggregated Data +### Toplulaştırılmış Verileri Sorgulama -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. +Toplulaştırmalar, boyut ve zaman aralıklarına göre filtreleme ve geri alma imkanı sağlayan sorgu alanları aracılığıyla dışarı sunulur. Örnek: @@ -97,13 +98,13 @@ Aggregations are exposed via query fields that allow filtering and retrieval bas } ``` -### Using Dimensions in Aggregations +### Toplulaştırmalarda Boyutların Kullanımı -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. +Boyutlar, veri noktalarını gruplamak için kullanılan toplulaştırılmamış alanlardır. Belirli kriterlere dayalı toplulaştırmalar sağlarlar. Mesela, bir finansal uygulamadaki belirli bir token'a ait toplulaştırmaları elde etmek için kullanılırlar. Örnek: -### Timeseries Entity +### Zaman Serisi Varlığı ```graphql type TokenData @entity(timeseries: true) { @@ -115,7 +116,7 @@ type TokenData @entity(timeseries: true) { } ``` -### Aggregation Entity with Dimension +### Boyut Kullanan Toplulaştırma Varlığı ```graphql type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { @@ -128,67 +129,67 @@ type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { } ``` -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. +- Boyut Alanı: Token, verileri gruplar. Bu nedenle toplulaştırmalar her token için hesaplanır. +- Toplulaştırmalar: + - totalVolume: Miktarın (amount alanının) toplamı. + - priceUSD: Kayıtlı son priceUSD. + - count: Kayıtların kümülatif sayısı. -### Aggregation Functions and Expressions +### Toplulaştırma Fonksiyonları ve İfadeleri -Supported aggregation functions: +Desteklenen toplulaştırma fonksiyonları: -- sum -- count -- min -- max -- first -- last +- sum (toplama) +- count (sayma) +- min (asgari) +- max (azami) +- first (ilk) +- last (son) -### The arg in @aggregate can be +### @aggregate içindeki arg (argüman) şunlar olabilir -- A field name from the timeseries entity. -- An expression using fields and constants. +- Zaman serisi varlığındaki bir alan adı. +- Alanlar ve sabit değerler kullanılarak oluşturulan bir ifade. -### Examples of Aggregation Expressions +### Toplulaştırma İfadeleri Örnekleri -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") +- Token Değerini Topla: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Azami Pozitif Tutar: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Koşullu Toplama: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. +Desteklenen operatörler ve fonksiyonlar arasında temel aritmetik (+, -, \_, /) operatörleri, karşılaştırma operatörleri, mantıksal operatörler (and, or, not) ve SQL fonksiyonları (örneğin greatest, least, coalesce) bulunur. -### Query Parameters +### Sorgu Parametreleri -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). +- interval: Zaman aralığını belirtir (ör., "hour"). +- where: Boyutlara ve zaman damgası aralıklarına göre filtreler. +- timestamp_gte / timestamp_lt: Başlangıç ve bitiş zamanları için filtreler (dönem başlangıcından itibaren geçen mikrosaniye cinsinden). -### Notes +### Notlar -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. +- Sıralama: Sonuçlar otomatik olarak zaman damgası ve id'ye göre azalan sırada sıralanır. +- Mevcut Veriler: Opsiyonel mevcut argüman, mevcut, kısmen doldurulmuş aralığı içerebilir. -### Conclusion +### Sonuç -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: +Zaman serileri ve toplulaştırmaların subgraph'lerde uygulanması, zamana dayalı veri ile çalışan projeler için örnek bir uygulamadır. Bu yaklaşım: -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. +- Performansı Artırır: Veri işleme yükünü azaltarak endeksleme ve sorgulamayı hızlandırır. +- Geliştirmeyi Basitleştirir: Eşlemelerde manuel toplulaştırma mantığı ihtiyacını ortadan kaldırır. +- Verimli Ölçeklenir: Hız veya yanıt verme süresinden ödün vermeden büyük hacimli verileri işler. -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. +Bu modeli benimseyerek, geliştiriciler daha verimli ve ölçeklenebilir subgraph'ler oluşturabilir ve son kullanıcılara daha hızlı ve güvenilir veri erişimi sağlayabilirler. Zaman serileri ve toplulaştırmaların uygulanması hakkında daha fazla bilgi için [Zaman Serileri ve Toplulaştırmalar](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) belgesini inceleyin. Bu özelliği subgraph'lerinizde denemeyi de düşünebilirsiniz. -## Subgraph Best Practices 1-6 +## Subgraph Örnek Uygulamalar 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Subgraph Budama ile Sorgu Hızını İyileştirin](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [@derivedFrom Kullanarak Endeksleme ve Sorgu Yanıt Hızını Artırın](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Değişmez Varlıklar ve Bytes ID'ler Kullanarak Endeksleme ve Sorgu Performansını Artırın](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Endeksleme Hızını `eth_calls`'den Kaçınarak İyileştirin](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Zaman Serileri ve Bütünleştirme ile Basitleştirin ve Optimize Edin](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Hızlı Düzeltme Dağıtımı için Aşılama Kullanın](/subgraphs/cookbook/grafting-hotfix/) From 6c4a0d0f2b11642d2e5e3a274a3e9a765c16a36c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:32 -0500 Subject: [PATCH 0796/1534] New translations timeseries.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/uk/subgraphs/cookbook/timeseries.mdx b/website/src/pages/uk/subgraphs/cookbook/timeseries.mdx index 0168be53d7ed..fdee00c65f2b 100644 --- a/website/src/pages/uk/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From e5a199dab8931bc2647d8dd10043575d973ec03b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:33 -0500 Subject: [PATCH 0797/1534] New translations timeseries.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/zh/subgraphs/cookbook/timeseries.mdx b/website/src/pages/zh/subgraphs/cookbook/timeseries.mdx index 7ac1de481272..3f098c6bddcd 100644 --- a/website/src/pages/zh/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From f15dc42e62c71d741bc0e79515bee1796e102774 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:34 -0500 Subject: [PATCH 0798/1534] New translations timeseries.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/ur/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ur/subgraphs/cookbook/timeseries.mdx index 5049667a8d5c..aa255a266504 100644 --- a/website/src/pages/ur/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From 3ddaa0540a86d683b1863b0b8de2d13aad535ef6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:35 -0500 Subject: [PATCH 0799/1534] New translations timeseries.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/vi/subgraphs/cookbook/timeseries.mdx b/website/src/pages/vi/subgraphs/cookbook/timeseries.mdx index 4a55ac2bcd5a..9d93817fca20 100644 --- a/website/src/pages/vi/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From e1a7d9df8a8a2b91fa0d8d219e55c4cbd9316391 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:36 -0500 Subject: [PATCH 0800/1534] New translations timeseries.mdx (Marathi) --- website/src/pages/mr/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/mr/subgraphs/cookbook/timeseries.mdx b/website/src/pages/mr/subgraphs/cookbook/timeseries.mdx index aef1fe57ac5f..834d97a2d8d5 100644 --- a/website/src/pages/mr/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From 69db61d72426a9fc8f9c2f857e7b5c81b873f194 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:37 -0500 Subject: [PATCH 0801/1534] New translations timeseries.mdx (Hindi) --- website/src/pages/hi/subgraphs/cookbook/timeseries.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/src/pages/hi/subgraphs/cookbook/timeseries.mdx b/website/src/pages/hi/subgraphs/cookbook/timeseries.mdx index 1e4dfb156048..2ea7ae692063 100644 --- a/website/src/pages/hi/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: "Subgraph Best Practice 5: Timeseries and Aggregations" --- ## TLDR From ce918c2575f05c1eb7a87ad730f5807aadc010d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:38 -0500 Subject: [PATCH 0802/1534] New translations transfer-to-the-graph.mdx (Romanian) --- .../src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx index 64c225442860..194deb018404 100644 --- a/website/src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ro/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From a142e92239223af1e8ced147db7efc1c2769822b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:39 -0500 Subject: [PATCH 0803/1534] New translations transfer-to-the-graph.mdx (French) --- .../cookbook/transfer-to-the-graph.mdx | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/website/src/pages/fr/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/fr/subgraphs/cookbook/transfer-to-the-graph.mdx index 6078975a20a3..d34a88327c64 100644 --- a/website/src/pages/fr/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/fr/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -1,74 +1,74 @@ --- -title: Tranfer to The Graph +title: Transférer vers The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Mettez rapidement à jour vos subgraphs depuis n'importe quelle plateforme vers [le réseau décentralisé de The Graph](https://thegraph.com/networks/). -## Benefits of Switching to The Graph +## Avantages du passage à The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. -- Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Utilisez le même subgraph que vos applications utilisent déjà avec une migration sans interruption de service. +- Améliorez la fiabilité grâce à un réseau mondial pris en charge par plus de 100 Indexers. +- Bénéficiez d’un support ultra-rapide pour vos subgraphs 24/7, avec une équipe d’ingénieurs de garde. -## Upgrade Your Subgraph to The Graph in 3 Easy Steps +## Mettez à jour votre Subgraph vers The Graph en 3 étapes simples 1. [Set Up Your Studio Environment](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) 2. [Deploy Your Subgraph to Studio](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) 3. [Publish to The Graph Network](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) -## 1. Set Up Your Studio Environment +## 1. Configurer votre environnement Studio -### Create a Subgraph in Subgraph Studio +### Créer un subgraph dans Subgraph Studio -- Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Accédez à [Subgraph Studio](https://thegraph.com/studio/) et connectez votre portefeuille. +- Cliquez sur « Créer un subgraph ». Il est recommandé de nommer le subgraph en majuscule : « Nom du subgraph Nom de la chaîne ». -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. -### Install the Graph CLI⁠ +### Installer Graph CLI -You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm` or `pnpm`) installed to use the Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. +Vous devez avoir Node.js et un gestionnaire de paquets de votre choix (`npm` or `pnpm`) installés pour utiliser Graph CLI. Vérifiez la version la [plus récente](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) de CLI. -On your local machine, run the following command: +Sur votre machine locale, exécutez la commande suivante : -Using [npm](https://www.npmjs.com/): +Utilisation de [npm](https://www.npmjs.com/) : ```sh npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Utilisez la commande suivante pour créer un subgraph dans Studio en utilisant CLI : ```sh graph init --product subgraph-studio ``` -### Authenticate Your Subgraph +### Authentifiez votre subgraph -In The Graph CLI, use the auth command seen in Subgraph Studio: +Dans Graph CLI, utilisez la commande auth vue dans Subgraph Studio : ```sh graph auth ``` -## 2. Deploy Your Subgraph to Studio +## 2. Déployez votre Subgraph sur Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +Si vous avez votre code source, vous pouvez facilement le déployer sur Studio. Si vous ne l'avez pas, voici un moyen rapide de déployer votre subgraph. -In The Graph CLI, run the following command: +Dans Graph CLI, exécutez la commande suivante : ```sh graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Note:**: Chaque subgraph a un hash IPFS (ID de déploiement), qui ressemble à ceci : "Qmasdfad...". Pour déployer, utilisez simplement ce **hash IPFS**. Vous serez invité à entrer une version (par exemple, v0.0.1). -## 3. Publish Your Subgraph to The Graph Network +## 3. Publier votre Subgraph sur The Graph Network -![publish button](/img/publish-sub-transfer.png) +![bouton de publication](/img/publish-sub-transfer.png) -### Query Your Subgraph +### Interroger votre Subgraph > To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. @@ -76,29 +76,29 @@ You can start [querying](/subgraphs/querying/introduction/) any subgraph by send #### Exemple -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[Subgraph Ethereum CryptoPunks](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) par Messari: -![Query URL](/img/cryptopunks-screenshot-transfer.png) +![L'URL de requête](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +L'URL de requête pour ce subgraph est : ```sh -https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK +https://gateway-arbitrum.network.thegraph.com/api/`**votre-propre-clé-Api**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK ``` -Now, you simply need to fill in **your own API Key** to start sending GraphQL queries to this endpoint. +Maintenant, il vous suffit de remplir **votre propre clé API** pour commencer à envoyer des requêtes GraphQL à ce point de terminaison. -### Getting your own API Key +### Obtenir votre propre clé API -You can create API Keys in Subgraph Studio under the “API Keys” menu at the top of the page: +Vous pouvez créer des clés API dans Subgraph Studio sous le menu "API Keys" en haut de la page : -![API keys](/img/Api-keys-screenshot.png) +![clés API](/img/Api-keys-screenshot.png) -### Monitor Subgraph Status +### Surveiller l'état du Subgraph -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Une fois que vous avez mis à jour, vous pouvez accéder et gérer vos subgraphs dans [Subgraph Studio](https://thegraph.com/studio/) et explorer tous les subgraphs dans [The Graph Explorer](https://thegraph.com/networks/). -### Ressources additionnelles +### Ressources supplémentaires - To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- Pour explorer toutes les façons d'optimiser et de personnaliser votre subgraph pour de meilleures performances, lisez plus sur [la création d'un subgraph ici](/developing/creating-a-subgraph/). From 688ff4a7f5b878f8db04e61adcc062eaac4162e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:40 -0500 Subject: [PATCH 0804/1534] New translations transfer-to-the-graph.mdx (Spanish) --- .../src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx index 223800a19102..339032915f35 100644 --- a/website/src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/es/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From 53977b449189bea25c8b8e52586a67dc74edb0c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:41 -0500 Subject: [PATCH 0805/1534] New translations transfer-to-the-graph.mdx (Arabic) --- .../src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx index 375f82097493..f713ec3a5e76 100644 --- a/website/src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ar/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -74,7 +74,7 @@ graph deploy --ipfs-hash You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. -#### مثال +#### Example [CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: From 83e6dfcda6010f28a3ef4919cdbbce1683de445a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:42 -0500 Subject: [PATCH 0806/1534] New translations transfer-to-the-graph.mdx (Czech) --- .../src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx index 261cc3e03b80..3e4f8eee8ccf 100644 --- a/website/src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/cs/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From 132ffb89c26f6f80d14f218fa74cb74c707ec9c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:43 -0500 Subject: [PATCH 0807/1534] New translations transfer-to-the-graph.mdx (German) --- .../pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx index d9966a91c24f..a97a3c618c03 100644 --- a/website/src/pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/de/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Gehen Sie zu [Subgraph Studio] (https://thegraph.com/studio/) und verbinden Sie Ihre Wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -74,7 +74,7 @@ graph deploy --ipfs-hash You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. -#### Example +#### Beispiel [CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: @@ -98,7 +98,7 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). -### Additional Resources +### Zusätzliche Ressourcen - To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). - To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). From d3af5185a912b10c33cb1cff39df43253eb49660 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:44 -0500 Subject: [PATCH 0808/1534] New translations transfer-to-the-graph.mdx (Italian) --- .../src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx index 0acdb716eef6..4c435d24f56c 100644 --- a/website/src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/it/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From 619b03d57120b80cc1316130fe3962080e8326f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:45 -0500 Subject: [PATCH 0809/1534] New translations transfer-to-the-graph.mdx (Japanese) --- .../src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx index 8ba1b5914dd9..6ef52284a5f5 100644 --- a/website/src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ja/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From 18e051b21d1dc1100c986996e2c6079b32e7588c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:46 -0500 Subject: [PATCH 0810/1534] New translations transfer-to-the-graph.mdx (Korean) --- .../src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx index 64c225442860..194deb018404 100644 --- a/website/src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ko/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From 4e89d57ee41a4a50fdd8599cbeb5667d9e3cbaff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:47 -0500 Subject: [PATCH 0811/1534] New translations transfer-to-the-graph.mdx (Dutch) --- .../src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx index 64c225442860..194deb018404 100644 --- a/website/src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/nl/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From 3824d816db67fef2c5258106929b16e460ad2022 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:49 -0500 Subject: [PATCH 0812/1534] New translations transfer-to-the-graph.mdx (Polish) --- .../src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx index 64c225442860..194deb018404 100644 --- a/website/src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/pl/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From 8621ffdaf6f027cde5ad7e2190ce8835581d678d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:50 -0500 Subject: [PATCH 0813/1534] New translations transfer-to-the-graph.mdx (Portuguese) --- .../cookbook/transfer-to-the-graph.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/pt/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/pt/subgraphs/cookbook/transfer-to-the-graph.mdx index 0a0bac2dbc06..e5ad802a2941 100644 --- a/website/src/pages/pt/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/pt/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -1,35 +1,35 @@ --- -title: Tranfer to The Graph +title: Transfira-se para The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Migre rapidamente os seus subgraphs, de qualquer plataforma para a [rede descentralizada do The Graph](https://thegraph.com/networks/). -## Benefits of Switching to The Graph +## Vantagens de Trocar para The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. -- Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Use o mesmo subgraph que os seus aplicativos já usam, com migração sem tempo de ócio. +- Aumenta a confiabilidade de uma rede global mantida por mais de 100 Indexadores. +- Receba suporte rápido para subgraphs, com uma equipa de engenharia de plantão disponível a todas as horas. -## Upgrade Your Subgraph to The Graph in 3 Easy Steps +## Atualize o Seu Subgraph para The Graph em 3 Etapas Fáceis -1. [Set Up Your Studio Environment](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) -2. [Deploy Your Subgraph to Studio](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) -3. [Publish to The Graph Network](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) +1. [Construa o Seu Ambiente do Studio](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) +2. [Implante o Seu Subgraph no Studio](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) +3. [Edite na The Graph Network](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) -## 1. Set Up Your Studio Environment +## Construa o Seu Ambiente do Studio -### Create a Subgraph in Subgraph Studio +### Como Criar um Subgraph no Subgraph Studio -- Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". +- Entre no [Subgraph Studio](https://thegraph.com/studio/) e conecte a sua carteira de criptomoedas. +- Clique em "Create a Subgraph" ("Criar um Subgraph"). É recomendado nomear o subgraph em caixa de título: por exemplo, "Nome De Subgraph Nome da Chain". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Observação: após a edição, o nome do subgraph poderá ser editado, mas isto sempre exigirá uma ação on-chain sempre, então pense bem no nome que irá dar. -### Install the Graph CLI⁠ +### Instale a Graph CLI -You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm` or `pnpm`) installed to use the Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. +É necessário ter [Node.js](https://nodejs.org/) e um gerenciador de pacotes da sua escolha (`npm` ou `pnpm`) instalados, para usar a Graph CLI. Verifique a versão [mais recente](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) da CLI. -On your local machine, run the following command: +Na sua máquina local, execute o seguinte comando: Uso de [npm](https://www.npmjs.com/): @@ -37,68 +37,68 @@ Uso de [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Use o comando a seguir para criar um subgraph no Studio com a CLI: ```sh graph init --product subgraph-studio ``` -### Authenticate Your Subgraph +### Autentique o Seu Subgraph -In The Graph CLI, use the auth command seen in Subgraph Studio: +Na Graph CLI, use o comando `auth` visto no Subgraph Studio: ```sh graph auth ``` -## 2. Deploy Your Subgraph to Studio +## 2. Implante o Seu Subgraph no Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +Se tiver o seu código-fonte, pode facilmente implantá-lo no Studio. Se não o tiver, aqui está uma maneira rápida de implantar o seu subgraph. -In The Graph CLI, run the following command: +Na The Graph CLI, execute o seguinte comando: ```sh graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Observação:** Cada subgraph tem um hash IPFS (ID de Implantação), que se parece com isto: "Qmasdfad...". Para implantar, basta usar este **hash IPFS**. Aparecerá uma solicitação de versão (por exemplo, v0.0.1). -## 3. Publish Your Subgraph to The Graph Network +## 3. Edite o Seu Subgraph na The Graph Network -![publish button](/img/publish-sub-transfer.png) +![Botão de edição](/img/publish-sub-transfer.png) -### Query Your Subgraph +### Faça um Query -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> Para atrair cerca de 3 indexadores para fazer queries no seu subgraph, recomendamos curar pelo menos 3.000 GRT. Para saber mais sobre a curadoria, leia sobre [Curadoria](/resources/roles/curating/) no The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +Dá para começar a [fazer queries](/subgraphs/querying/introduction/) em qualquer subgraph enviando um query GraphQL para o ponto final da URL de query do subgraph, localizado na parte superior da página do Explorer no Subgraph Studio. #### Exemplo -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[Subgraph: CryptoPunks Ethereum](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) por Messari: -![Query URL](/img/cryptopunks-screenshot-transfer.png) +![URL de Query](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +A URL de queries para este subgraph é: ```sh -https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK +https://gateway-arbitrum.network.thegraph.com/api/`**sua-chave-de-api**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK ``` -Now, you simply need to fill in **your own API Key** to start sending GraphQL queries to this endpoint. +Agora, você só precisa preencher **sua própria chave de API** para começar a enviar queries GraphQL para este ponto final. -### Getting your own API Key +### Como adquirir sua própria Chave de API -You can create API Keys in Subgraph Studio under the “API Keys” menu at the top of the page: +É possível riar chaves de API no Subgraph Studio na aba “Chaves de API” na parte superior da página: -![API keys](/img/Api-keys-screenshot.png) +![Chaves de API](/img/Api-keys-screenshot.png) -### Monitor Subgraph Status +### Como Monitorar o Estado do Seu Subgraph -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Após a atualização, poderá acessar e gerir os seus subgraphs no [Subgraph Studio](https://thegraph.com/studio/) e explorar todos os subgraphs no [The Graph Explorer](https://thegraph.com/networks/). ### Outros Recursos -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- Para criar e editar um novo subgraph, veja o [Guia de Início Rápido](/subgraphs/quick-start/). +- Para explorar todas as maneiras de otimizar e personalizar o seu subgraph para melhor desempenho, leia mais sobre [como criar um subgraph aqui](/developing/creating-a-subgraph/). From f7435d556f77e21631bdd3fe5eca006f60a1bb2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:51 -0500 Subject: [PATCH 0814/1534] New translations transfer-to-the-graph.mdx (Russian) --- .../cookbook/transfer-to-the-graph.mdx | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/ru/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ru/subgraphs/cookbook/transfer-to-the-graph.mdx index 3b434108dbaa..570aab81debc 100644 --- a/website/src/pages/ru/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ru/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -1,35 +1,35 @@ --- -title: Tranfer to The Graph +title: Перенос в The Graph --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Быстро обновите свои субграфы с любой платформы до [децентрализованной сети The Graph](https://thegraph.com/networks/). -## Benefits of Switching to The Graph +## Преимущества перехода на The Graph -- Use the same subgraph that your apps already use with zero-downtime migration. -- Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Используйте тот же субграф, который уже используют ваши приложения, с миграцией без времени простоя. +- Повышайте надежность благодаря глобальной сети, поддерживаемой более чем 100 индексаторами. +- Получайте молниеносную поддержку для субграфов 24/7 от дежурной команды инженеров. -## Upgrade Your Subgraph to The Graph in 3 Easy Steps +## Обновите свой субграф до The Graph за 3 простых шага -1. [Set Up Your Studio Environment](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) -2. [Deploy Your Subgraph to Studio](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) -3. [Publish to The Graph Network](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) +1. [Настройте свою среду Studio](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) +2. [Разверните свой субграф в Studio](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) +3. [Опубликуйте в сети The Graph](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) -## 1. Set Up Your Studio Environment +## 1. Настройте свою среду в Studio -### Create a Subgraph in Subgraph Studio +### Создайте субграф в Subgraph Studio - Перейдите в [Subgraph Studio](https://thegraph.com/studio/) и подключите свой кошелек. - Нажмите "Создать субграф". Рекомендуется называть субграф с использованием Заглавного регистра: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Примечание: после публикации имя субграфа будет доступно для редактирования, но для этого каждый раз потребуется действие на он-чейне, поэтому выберите подходящее имя сразу. -### Install the Graph CLI⁠ +### Установите Graph CLI -You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm` or `pnpm`) installed to use the Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. +Для использования Graph CLI у Вас должны быть установлены [Node.js](https://nodejs.org/) и выбранный Вами менеджер пакетов (`npm` или `pnpm`). Проверьте [самую последнюю](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) версию CLI. -On your local machine, run the following command: +Выполните следующую команду на своем локальном компьютере: Использование [npm](https://www.npmjs.com/): @@ -37,68 +37,68 @@ On your local machine, run the following command: npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +Используйте следующую команду для создания субграфа в Studio с помощью CLI: ```sh graph init --product subgraph-studio ``` -### Authenticate Your Subgraph +### Аутентификация Вашего субграфа -In The Graph CLI, use the auth command seen in Subgraph Studio: +В Graph CLI используйте команду `auth`, как показано в Subgraph Studio: ```sh graph auth ``` -## 2. Deploy Your Subgraph to Studio +## 2. Разверните свой субграф в Studio -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +Если у Вас есть исходный код, Вы можете с легкостью развернуть его в Studio. Если его нет, вот быстрый способ развернуть Ваш субграф. -In The Graph CLI, run the following command: +В Graph CLI выполните следующую команду: ```sh graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Примечание:** Каждый субграф имеет хэш IPFS (идентификатор развертывания), который выглядит так: "Qmasdfad...". Для развертывания просто используйте этот **IPFS хэш**. Вам будет предложено ввести версию (например, v0.0.1). -## 3. Publish Your Subgraph to The Graph Network +## 3. Опубликуйте свой субграф в The Graph Network -![publish button](/img/publish-sub-transfer.png) +![кнопка публикации](/img/publish-sub-transfer.png) -### Query Your Subgraph +### Запросите Ваш Субграф -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> Для того чтобы привлечь около 3 индексаторов для запроса Вашего субграфа, рекомендуется зафиксировать как минимум 3000 GRT. Чтобы узнать больше о кураторстве, ознакомьтесь с разделом [Кураторство](/resources/roles/curating/) на платформе The Graph. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +Вы можете начать [запрашивать](/subgraphs/querying/introduction/) любой субграф, отправив запрос GraphQL на конечную точку URL-адреса его запроса, которая расположена в верхней части страницы его эксплорера в Subgraph Studio. #### Пример -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +[Субграф CryptoPunks на Ethereum](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) от Messari: -![Query URL](/img/cryptopunks-screenshot-transfer.png) +![URL запроса](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +URL запроса для этого субграфа: ```sh -https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK +https://gateway-arbitrum.network.thegraph.com/api/`**Ваш-api-ключ**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK ``` -Now, you simply need to fill in **your own API Key** to start sending GraphQL queries to this endpoint. +Теперь Вам нужно просто вставить **Ваш собственный API-ключ**, чтобы начать отправлять GraphQL-запросы на эту конечную точку. -### Getting your own API Key +### Получение собственного API-ключа -You can create API Keys in Subgraph Studio under the “API Keys” menu at the top of the page: +Вы можете создать API-ключи в Subgraph Studio в меню «API Keys» в верхней части страницы: -![API keys](/img/Api-keys-screenshot.png) +![API ключи](/img/Api-keys-screenshot.png) -### Monitor Subgraph Status +### Мониторинг статуса субграфа -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +После обновления Вы сможете получить доступ к своим субграфам и управлять ими в [Subgraph Studio](https://thegraph.com/studio/) и исследовать все субграфы в [The Graph Explorer](https://thegraph.com/networks/). -### Дополнительные источники +### Дополнительные ресурсы -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- Чтобы быстро создать и опубликовать новый субграф, ознакомьтесь с [Руководством по быстрому старту](/subgraphs/quick-start/). +- Чтобы исследовать все способы оптимизации и настройки своего субграфа для улучшения производительности, читайте больше о [создании субграфа здесь](/developing/creating-a-subgraph/). From 46ff20e1310630fcd87e5c88be8f8c32892156f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:52 -0500 Subject: [PATCH 0815/1534] New translations transfer-to-the-graph.mdx (Swedish) --- .../src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx index 6d9ebccab5c4..f06ed1722258 100644 --- a/website/src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/sv/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From 413c8d5429bde7c416ba636144adf5d702630dab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:53 -0500 Subject: [PATCH 0816/1534] New translations transfer-to-the-graph.mdx (Turkish) --- .../cookbook/transfer-to-the-graph.mdx | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/website/src/pages/tr/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/tr/subgraphs/cookbook/transfer-to-the-graph.mdx index 1cb8b2d6e9a9..a02f136958c2 100644 --- a/website/src/pages/tr/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/tr/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -1,35 +1,35 @@ --- -title: Tranfer to The Graph +title: The Graph'e Transfer --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +Subgraph'lerinizi herhangi bir platformdan hızlıca [The Graph'in merkezi olmayan ağına](https://thegraph.com/networks/) yükseltin. -## Benefits of Switching to The Graph +## The Graph'e Geçmenin Avantajları -- Use the same subgraph that your apps already use with zero-downtime migration. -- Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- Uygulamalarınızın zaten kullandığı subgraph'i kesinti yaşamadan aynı şekilde kullanabilirsiniz. +- Yüzden fazla Endeksleyici tarafından desteklenip global bir ağdan gelen güvenilirliği artırabilirsiniz. +- Subgraph'ler için, her zaman yardıma hazır mühendislik ekibinden 7/24 yıldırım hızında destek alabilirsiniz. -## Upgrade Your Subgraph to The Graph in 3 Easy Steps +## Subgraph'inizi The Graph'e 3 Kolay Adımda Yükseltin -1. [Set Up Your Studio Environment](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) -2. [Deploy Your Subgraph to Studio](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) -3. [Publish to The Graph Network](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) +1. [Studio Ortamınızı Kurun](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) +2. [Subgraph'inizi Studio'ya Dağıtın](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) +3. [The Graph Ağı'nda Yayımlayın](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) -## 1. Set Up Your Studio Environment +## 1. Stüdyo Ortamınızı Ayarlayın -### Create a Subgraph in Subgraph Studio +### Subgraph Studio'da Bir Subgraph Oluştur - [Subgraph Studio](https://thegraph.com/studio/)'ya gidin ve cüzdanınızı bağlayın. - "Subgraph Oluştur" düğmesine tıklayın. Subgraph'in adını başlık formunda vermeniz önerilir: "Subgraph Adı Ağ Adı". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Not: Yayımladıktan sonra subgraph ismi değiştirilebilir, ancak bunu yapmak her seferinde zincir üzerinde işlem gerektirir. Bu yüzden isim verirken iyi düşünün. -### Install the Graph CLI⁠ +### Graph CLI'ı Yükle -You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm` or `pnpm`) installed to use the Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. +Graph CLI'ı kullanmak için [Node.js](https://nodejs.org/) ve tercih ettiğiniz bir paket yöneticisi (`npm` veya `pnpm`) kurulu olmalıdır. [En son](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI sürümünü kontrol edin. -On your local machine, run the following command: +Yerel makinenizde şu komutu çalıştırın: [npm](https://www.npmjs.com/) kullanarak: @@ -37,68 +37,68 @@ On your local machine, run the following command: npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +CLI kullanarak Studio'da subgraph oluşturmak için aşağıdaki komutu kullanın: ```sh graph init --product subgraph-studio ``` -### Authenticate Your Subgraph +### Subgraph'inizi Doğrulayın -In The Graph CLI, use the auth command seen in Subgraph Studio: +The Graph CLI'da, Subgraph Studio'da görülen auth komutunu kullanın: ```sh -graph auth +graph auth ``` -## 2. Deploy Your Subgraph to Studio +## 2. Subgraph'inizi Studio'ya Dağıtın -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +Kaynak kodunuz elinizdeyse kodunuzu Studio'ya kolayca dağıtabilirsiniz. Kaynak kodunuza sahip değilseniz, subgraph'inizi dağıtmanın hızlı yolunu aşağıda bulabilirsiniz. -In The Graph CLI, run the following command: +The Graph CLI'de aşağıdaki komutu çalıştırın: ```sh -graph deploy --ipfs-hash +graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> **Not:** Her subgraph'in bir IPFS hash değeri (Dağıtım Kimliği) vardır ve bu şekilde görünür: "Qmasdfad...". Dağıtmak için bu **IPFS hash'ini** kullanmanız yeterlidir. Sizden bir versiyon girmeniz istenecektir (örneğin, v0.0.1). -## 3. Publish Your Subgraph to The Graph Network +## 3. Subgraph'inizi The Graph Ağı'nda Yayımlayın -![publish button](/img/publish-sub-transfer.png) +![yayımla butonu](/img/publish-sub-transfer.png) -### Query Your Subgraph +### Subgraph'inizi Sorgulayın -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. +> Subgraph'inizi sorgulamak için yaklaşık üç endeksleyici çekmek için, en az 3000 GRT ile kürasyon yapmanız önerilir. Kürasyon hakkında daha fazla bilgi için, The Graph üzerindeki [Kürasyon](/resources/roles/curating/) sayfasına göz atın. -You can start [querying](/subgraphs/querying/introduction/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. +Herhangi bir subgraph'i [sorgulamaya](/subgraphs/querying/introduction/) başlamak için, bir GraphQL sorgusunu subgraph’in sorgu URL uç noktasına gönderebilirsiniz. Bu uç nokta Subgraph Studio'daki Gezgin sayfasının üst kısmında bulunur. #### Örnek -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: +Messari tarafından hazırlanmış [CryptoPunks Ethereum subgraph'i](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK): -![Query URL](/img/cryptopunks-screenshot-transfer.png) +![Sorgu URL'si](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +Bu subgraph için sorgu URL'si: ```sh -https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK +https://gateway-arbitrum.network.thegraph.com/api/`**kendi-api-anahtarınız**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK ``` -Now, you simply need to fill in **your own API Key** to start sending GraphQL queries to this endpoint. +Artık, bu uç noktaya GraphQL sorguları göndermeye başlamak için **kendi API Anahtarınızı** girmeniz yeterlidir. -### Getting your own API Key +### Kendi API Anahtarınızı Almak -You can create API Keys in Subgraph Studio under the “API Keys” menu at the top of the page: +API Anahtarlarını Subgraph Studio'da sayfanın üst kısmındaki “API Anahtarları” menüsünden oluşturabilirsiniz: -![API keys](/img/Api-keys-screenshot.png) +![API anahtarları](/img/Api-keys-screenshot.png) -### Monitor Subgraph Status +### Subgraph Durumunu İzle -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +Yükseltme yaptıktan sonra, [Subgraph Studio](https://thegraph.com/studio/) üzerinde subgraph'lerinize erişip onları yönetebilirsiniz. Ayrıca, [The Graph Gezgini](https://thegraph.com/networks/) içindeki tüm subgraph'leri burada keşfedebilirsiniz. ### Ek Kaynaklar -- To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- Hızlı bir şekilde yeni bir subgraph oluşturmak ve yayımlamak için [Hızlı Başlangıç](/subgraphs/quick-start/) bölümüne göz atın. +- Daha iyi bir performans için subgraph'inizi optimize etmenin ve özelleştirmenin tüm yollarını keşfetmek için [subgraph oluşturma](/developing/creating-a-subgraph/) hakkında daha fazla okuyun. From 58d3ea9b85e7aa1583db1b232dabb6f365e4b932 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:54 -0500 Subject: [PATCH 0817/1534] New translations transfer-to-the-graph.mdx (Ukrainian) --- .../src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx index 64c225442860..aed61c2c695b 100644 --- a/website/src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/uk/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -98,7 +98,7 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). -### Additional Resources +### Додаткові матеріали - To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). - To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). From bee28ab3db450d9956c679a2ed5cb9166a8612ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:55 -0500 Subject: [PATCH 0818/1534] New translations transfer-to-the-graph.mdx (Chinese Simplified) --- .../src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx index f5b0745282c9..497dc254ddf1 100644 --- a/website/src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/zh/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From b50269958de1ec80b2c9a813da589bb72d520dd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:56 -0500 Subject: [PATCH 0819/1534] New translations transfer-to-the-graph.mdx (Urdu (Pakistan)) --- .../src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx index 4930c7916184..43fd50c14672 100644 --- a/website/src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/ur/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From f01497fee6a3d1fd2ecdaf556a0027fcb937e08a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:57 -0500 Subject: [PATCH 0820/1534] New translations transfer-to-the-graph.mdx (Vietnamese) --- .../src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx index 70e2afcb503c..78493fe55b06 100644 --- a/website/src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/vi/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ From ce49a59de53dd7cad46eb06bb5ce674bc87c0332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:58 -0500 Subject: [PATCH 0821/1534] New translations transfer-to-the-graph.mdx (Marathi) --- .../src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx index 08ea42476c1a..d31f9d8864b5 100644 --- a/website/src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/mr/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -23,7 +23,7 @@ Quickly upgrade your subgraphs from any platform to [The Graph's decentralized n - Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. - Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. ### Install the Graph CLI⁠ @@ -98,7 +98,7 @@ You can create API Keys in Subgraph Studio under the “API Keys” menu at the Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). -### Additional Resources +### अतिरिक्त संसाधने - To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). - To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). From 06457d7df6ee798b80511dddfb8628b87d87569c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:58:59 -0500 Subject: [PATCH 0822/1534] New translations transfer-to-the-graph.mdx (Hindi) --- .../cookbook/transfer-to-the-graph.mdx | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/website/src/pages/hi/subgraphs/cookbook/transfer-to-the-graph.mdx b/website/src/pages/hi/subgraphs/cookbook/transfer-to-the-graph.mdx index 3e99667ab7cc..ae5023b492a4 100644 --- a/website/src/pages/hi/subgraphs/cookbook/transfer-to-the-graph.mdx +++ b/website/src/pages/hi/subgraphs/cookbook/transfer-to-the-graph.mdx @@ -1,35 +1,35 @@ --- -title: Tranfer to The Graph +title: The Graph पर ट्रांसफर करें --- -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). +अपने subgraphs को किसी भी प्लेटफ़ॉर्म से The Graph's decentralized network(https://thegraph.com/networks/) में जल्दी से अपग्रेड करें। -## Benefits of Switching to The Graph +## The Graph पर स्विच करने के लाभ -- Use the same subgraph that your apps already use with zero-downtime migration. -- Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. +- आपके ऐप्स द्वारा पहले से उपयोग किए जा रहे वही subgraph को बिना किसी डाउनटाइम के माइग्रेशन के लिए उपयोग करें। +- 100+ Indexers द्वारा समर्थित एक वैश्विक नेटवर्क से विश्वसनीयता बढ़ाएं। +- सबग्राफ के लिए 24/7 तेज़ और तुरंत समर्थन प्राप्त करें, एक ऑन-कॉल इंजीनियरिंग टीम के साथ। -## Upgrade Your Subgraph to The Graph in 3 Easy Steps +## अपने Subgraph को The Graph में 3 आसान कदमों में अपग्रेड करें 1. [Set Up Your Studio Environment](/subgraphs/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) 2. [Deploy Your Subgraph to Studio](/subgraphs/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) 3. [Publish to The Graph Network](/subgraphs/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) -## 1. Set Up Your Studio Environment +## 1. अपने स्टूडियो वातावरण को सेट करें -### Create a Subgraph in Subgraph Studio +### सबग्राफ बनाएँ Subgraph Studio में - [Subgraph Studio](https://thegraph.com/studio/) पर जाएँ और अपने वॉलेट को कनेक्ट करें। - "एक सबग्राफ बनाएं" पर क्लिक करें। सबग्राफ का नाम टाइटल केस में रखनाrecommended है: "सबग्राफ नाम चेन नाम"। -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. +> Note: After publishing, the subgraph name will be editable but requires onchain action each time, so name it properly. -### Install the Graph CLI⁠ +### Graph CLI स्थापित करें -You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm` or `pnpm`) installed to use the Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. +आपको Node.js(https://nodejs.org/) और अपनी पसंद का पैकेज मैनेजर (npm या pnpm) इंस्टॉल करना होगा ताकि आप Graph CLI का उपयोग कर सकें। सबसे हालिया(https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI संस्करण चेक करें। -On your local machine, run the following command: +अपने लोकल मशीन पर, निम्नलिखित कमांड चलाएँ: Using [npm](https://www.npmjs.com/): @@ -37,38 +37,38 @@ Using [npm](https://www.npmjs.com/): npm install -g @graphprotocol/graph-cli@latest ``` -Use the following command to create a subgraph in Studio using the CLI: +इस कमांड का उपयोग करें और CLI का उपयोग करके Studio में एक subgraph बनाएँ: ```sh graph init --product subgraph-studio ``` -### Authenticate Your Subgraph +### अपने Subgraph को प्रमाणित करें -In The Graph CLI, use the auth command seen in Subgraph Studio: +The Graph CLI में, 'auth' कमांड का उपयोग करें जो Subgraph Studio में देखा गया है: ```sh graph auth ``` -## 2. Deploy Your Subgraph to Studio +## 2. अपने Subgraph को Studio पर डिप्लॉय करें -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. +यदि आपके पास अपना सोर्स कोड है, तो आप इसे आसानी से Studio में डिप्लॉय कर सकते हैं। यदि आपके पास यह नहीं है, तो यहां एक त्वरित तरीका है अपनी subgraph को डिप्लॉय करने का। -In The Graph CLI, run the following command: +The Graph CLI में, निम्नलिखित कमांड चलाएँ: ```sh graph deploy --ipfs-hash ``` -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). +> नोट: प्रत्येक subgraph का एक IPFS हैश (Deployment ID) होता है, जो इस प्रकार दिखता है: "Qmasdfad...". बस इसे deploy करने के लिए इस IPFS हैश का उपयोग करें। आपको एक संस्करण दर्ज करने के लिए कहा जाएगा (जैसे, v0.0.1)। -## 3. Publish Your Subgraph to The Graph Network +## 3. अपने Subgraph को The Graph Network पर प्रकाशित करें -![publish button](/img/publish-sub-transfer.png) +![पब्लिश बटन](/img/publish-sub-transfer.png) -### Query Your Subgraph +### अपने Subgraph को क्वेरी करें > To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/resources/roles/curating/) on The Graph. @@ -80,25 +80,25 @@ You can start [querying](/subgraphs/querying/introduction/) any subgraph by send ![Query URL](/img/cryptopunks-screenshot-transfer.png) -The query URL for this subgraph is: +इस subgraph का क्वेरी URL है: ```sh https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK ``` -Now, you simply need to fill in **your own API Key** to start sending GraphQL queries to this endpoint. +अब, आपको केवल अपना API Key भरने की आवश्यकता है ताकि आप इस endpoint पर GraphQL queries भेज सकें। -### Getting your own API Key +### अपनी खुद की API Key प्राप्त करना -You can create API Keys in Subgraph Studio under the “API Keys” menu at the top of the page: +आप Subgraph Studio में पृष्ठ के शीर्ष पर “API Keys” मेनू के तहत API Keys बना सकते हैं: ![API keys](/img/Api-keys-screenshot.png) -### Monitor Subgraph Status +### सबग्राफ की स्थिति की निगरानी करें -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). +एक बार जब आप अपग्रेड करते हैं, तो आप Subgraph Studio(https://thegraph.com/studio/) में अपने सबग्राफ्स को एक्सेस और प्रबंधित कर सकते हैं और The Graph Explorer(https://thegraph.com/networks/) में सभी सबग्राफ्स को एक्सप्लोर कर सकते हैं। -### अतिरिक्त संसाधन +### Additional Resources - To quickly create and publish a new subgraph, check out the [Quick Start](/subgraphs/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). +- आप अपने subgraph के प्रदर्शन को बेहतर बनाने के लिए इसे अनुकूलित और कस्टमाइज़ करने के सभी तरीकों का पता लगाने के लिए, creating a subgraph here(/developing/creating-a-subgraph/) पर और पढ़ें। From 7e832ad8d9c1c177cadf558f0cc20cb3c34fb49b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:01 -0500 Subject: [PATCH 0823/1534] New translations assemblyscript-mappings.mdx (French) --- .../creating/assemblyscript-mappings.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/fr/subgraphs/developing/creating/assemblyscript-mappings.mdx index 22d69907dbd2..7bb87fa69ab6 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -1,16 +1,16 @@ --- -title: Writing AssemblyScript Mappings +title: Écrire des mappages en AssemblyScript --- ## Aperçu -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. +Les mappages prennent des données d'une source particulière et les transforment en entités définies dans votre schéma. Les mappages sont écrits dans un sous-ensemble de [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) appelé [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) qui peut être compilé en WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript est plus strict que le TypeScript normal, tout en offrant une syntaxe familière. ## Écriture de mappages -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. +Pour chaque gestionnaire d'événements défini dans `subgraph.yaml` sous `mapping.eventHandlers`, créez une fonction exportée du même nom. Chaque gestionnaire doit accepter un seul paramètre appelé `event` avec un type correspondant au nom de l'événement traité. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +Dans le subgraph d'exemple, `src/mapping.ts` contient des gestionnaires pour les événements `NewGravatar` et `UpdatedGravatar`: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -37,30 +37,30 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. +Le premier gestionnaire prend un événement `NewGravatar` et crée une nouvelle entité Gravatar avec `new Gravatar(event.params.id.toHex())`, en remplissant les champs de l'entité à l'aide des paramètres de l'événement correspondant. Cette instance d'entité est représentée par la variable `gravatar`, avec une valeur d'id de `event.params.id.toHex()`. -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. +Le deuxième gestionnaire tente de charger le `Gravatar` existant à partir du store Graph Node. S'il n'existe pas encore, il est créé à la demande. L'entité est ensuite mise à jour pour correspondre aux nouveaux paramètres de l'événement avant d'être sauvegardée dans le store à l'aide de `gravatar.save()`. ### ID recommandés pour la création de nouvelles entités -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. +Il est fortement recommandé d'utiliser `Bytes` comme type pour les champs `id`, et de n'utiliser `String` que pour les attributs qui contiennent vraiment du texte lisible par l'homme, comme le nom d'un jeton. Voici quelques valeurs d'`id` recommandées à prendre en compte lors de la création de nouvelles entités. - `transfer.id = event.transaction.hash` - `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like +- Pour les entités qui stockent des données agrégées, par exemple les volumes d'échanges quotidiens, l'`id` contient généralement le numéro du jour. Dans ce cas, l'utilisation d'un `Bytes` comme `id` est bénéfique. La détermination de l'`id` se fait de la manière suivante ```typescript let dayID = event.block.timestamp.toI32() / 86400 let id = Bytes.fromI32(dayID) ``` -- Convert constant addresses to `Bytes`. +- Convertir les adresses constantes en `Bytes`. `const id = Bytes.fromHexString('0xdead...beef')` -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. +Il existe une [Library Graph Typescript ](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) qui contient des utilitaires pour interagir avec le store Graph Node et des commodités pour gérer les données et les entités des contrats intelligents. Elle peut être importée dans `mapping.ts` depuis `@graphprotocol/graph-ts`. ### Traitement des entités ayant des identifiants identiques @@ -77,10 +77,10 @@ Afin de faciliter et de sécuriser le travail avec les contrats intelligents, le Cela se fait avec ```sh -graph codegen [--output-dir ] [] +graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +mais dans la plupart des cas, les subgraphs sont déjà préconfigurés via `package.json` pour vous permettre d'exécuter simplement l'un des éléments suivants pour obtenir le même résultat : ```sh # Yarn @@ -90,7 +90,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +Cela va générer une classe AssemblyScript pour chaque contrat intelligent dans les fichiers ABI mentionnés dans `subgraph.yaml`, vous permettant de lier ces contrats à des adresses spécifiques dans les mappagess et d'appeler des méthodes de contrat en lecture seule sur le bloc en cours de traitement. Il génère également une classe pour chaque événement de contrat afin de fournir un accès facile aux paramètres de l'événement, ainsi qu'au bloc et à la transaction d'où provient l'événement. Tous ces types sont écrits dans `//.ts`. Dans l'exemple du subgraph, ce serait `generated/Gravity/Gravity.ts`, permettant aux mappages d'importer ces types avec. ```javascript import { @@ -102,12 +102,12 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +En outre, une classe est générée pour chaque type d'entité dans le schéma GraphQL du subgraph. Ces classes fournissent un chargement sécurisé des entités, un accès en lecture et en écriture aux champs des entités ainsi qu'une méthode `save()` pour écrire les entités dans le store. Toutes les classes d'entités sont écrites dans le fichier `/schema.ts`, ce qui permet aux mappages de les importer avec la commande ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Note:** La génération de code doit être exécutée à nouveau après chaque modification du schéma GraphQL ou des ABIs incluses dans le manifeste. Elle doit également être effectuée au moins une fois avant de construire ou de déployer le subgraphs. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +La génération de code ne vérifie pas votre code de mappage dans `src/mapping.ts`. Si vous souhaitez vérifier cela avant d'essayer de déployer votre subgraph sur Graph Explorer, vous pouvez exécuter `yarn build` et corriger les erreurs de syntaxe que le compilateur TypeScript pourrait trouver. From 26428b1bf2602572d0fc04db18bee49dd8a85538 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:03 -0500 Subject: [PATCH 0824/1534] New translations assemblyscript-mappings.mdx (German) --- .../subgraphs/developing/creating/assemblyscript-mappings.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/de/subgraphs/developing/creating/assemblyscript-mappings.mdx index 2ac894695fe1..4354181a33df 100644 --- a/website/src/pages/de/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -2,7 +2,7 @@ title: Writing AssemblyScript Mappings --- -## Overview +## Überblick The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. From 1cdb63c223492d60aa062a30364962eed6059453 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:08 -0500 Subject: [PATCH 0825/1534] New translations assemblyscript-mappings.mdx (Swedish) --- .../subgraphs/developing/creating/assemblyscript-mappings.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/assemblyscript-mappings.mdx b/website/src/pages/sv/subgraphs/developing/creating/assemblyscript-mappings.mdx index 259ae147af9f..646645e79ccc 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/assemblyscript-mappings.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/assemblyscript-mappings.mdx @@ -99,13 +99,13 @@ import { // The events classes: NewGravatar, UpdatedGravatar, -} from '../generated/Gravity/Gravity' +} from "../generated/Gravity/Gravity"; ``` In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with ```javascript -import { Gravatar } from '../generated/schema' +import { Gravatar } from "../generated/schema" ``` > **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. From fa2aa46625a30075b0c7ee3f4604767e75e03b7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:12 -0500 Subject: [PATCH 0826/1534] New translations install-the-cli.mdx (Romanian) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ro/subgraphs/developing/creating/install-the-cli.mdx index a60f181e9a50..7d44ff8b2ee6 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The ABI file(s) must match your contract(s). There are a few ways to obtain ABI ## SpecVersion Releases -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 015a510ac65fdf9f463a3d36eccc934a43abcd65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:13 -0500 Subject: [PATCH 0827/1534] New translations install-the-cli.mdx (French) --- .../developing/creating/install-the-cli.mdx | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/fr/subgraphs/developing/creating/install-the-cli.mdx index bf710d6b1844..78f830c17c18 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/install-the-cli.mdx @@ -2,33 +2,33 @@ title: Installation du Graph CLI --- -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/subgraphs/developing/deploying/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). +> Pour utiliser votre subgraph sur le réseau décentralisé de The Graph, vous devrez [créer une clé API](/subgraphs/developing/deploying/subgraph-studio-faq/#2-how-do-i-create-an-api-key) dans [Subgraph Studio](https://thegraph.com/studio/apikeys/). Il est recommandé d'ajouter un signal à votre subgraph avec au moins 3 000 GRT pour attirer 2 à 3 Indexeurs. Pour en savoir plus sur la signalisation, consultez [curation](/resources/roles/curating/). ## Aperçu -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. +[Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) est une interface de ligne de commande qui facilite les commandes des développeurs pour The Graph. Il traite un [manifeste de subgraph](/subgraphs/developing/creating/subgraph-manifest/) et compile les [mappages](/subgraphs/developing/creating/assemblyscript-mappings/) pour créer les fichiers dont vous aurez besoin pour déployer le subgraph sur [Subgraph Studio](https://thegraph.com/studio/) et le réseau. -## Démarrage +## Introduction ### Installation du Graph CLI -The Graph CLI is written in TypeScript, and you must have `node` and either `npm` or `yarn` installed to use it. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. +L'interface de ligne de commande Graph est écrite en TypeScript et vous devez avoir installé `node` et soit `npm` soit `yarn` pour l'utiliser. Recherchez la version [la plus récente](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) de l'interface de ligne de commande. Sur votre machine locale, exécutez l'une des commandes suivantes : -#### Using [npm](https://www.npmjs.com/) +#### En utilisant [npm](https://www.npmjs.com/) ```bash npm install -g @graphprotocol/graph-cli@latest ``` -#### Using [yarn](https://yarnpkg.com/) +#### En utilisant [yarn](https://yarnpkg.com/) ```bash -npm install -g @graphprotocol/graph-cli +yarn global add @graphprotocol/graph-cli ``` -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. +La commande `graph init` peut être utilisée pour configurer un nouveau projet de subgraph, soit à partir d'un contrat existant, soit à partir d'un exemple de subgraph. Si vous avez déjà déployé un contrat intelligent sur votre réseau préféré, vous pouvez démarrer un nouveau subgraph à partir de ce contrat pour commencer. ## Créer un subgraph @@ -39,10 +39,10 @@ La commande suivante crée un subgraph qui indexe tous les événements d'un con ```sh graph init \ --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] + --from-contract \ + [--network ] \ + [--abi ] \ + [] ``` - La commande tente de récupérer l'ABI du contrat depuis Etherscan. @@ -51,7 +51,7 @@ graph init \ - Si certains arguments optionnels manquent, il vous guide à travers un formulaire interactif. -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. +- Le `` est l'ID de votre subgraph dans [Subgraph Studio](https://thegraph.com/studio/). Il se trouve sur la page de détails de votre subgraph. ### À partir d'un exemple de subgraph @@ -61,59 +61,59 @@ La commande suivante initialise un nouveau projet à partir d'un exemple de subg graph init --from-example=example-subgraph ``` -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. +- Le [subgraph d'exemple](https://github.com/graphprotocol/example-subgraph) est basé sur le contrat Gravity de Dani Grant, qui gère les avatars des utilisateurs et émet des événements `NewGravatar` ou `UpdateGravatar` chaque fois que des avatars sont créés ou mis à jour. -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. +- Le subgraph gère ces événements en écrivant des entités `Gravatar` dans le store de Graph Node et en veillant à ce qu'elles soient mises à jour en fonction des événements. -### Add New `dataSources` to an Existing Subgraph +### Ajouter de nouvelles `sources de données` à un subgraph existant -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. +Les `dataSources` sont des composants clés des subgraphs. Ils définissent les sources de données que le subgraphs indexe et traite. Une `dataSource` spécifie quel smart contract doit être écouté, quels événements doivent être traités et comment les traiter. -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: +Les versions récentes de Graph CLI permettent d'ajouter de nouvelles `dataSources` à un subgraph existant grâce à la commande `graph add` : ```sh graph add
    [] Options: - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") + --abi Chemin d'accès à l'ABI du contrat (par défaut : téléchargement à partir d'Etherscan) + --contract-name Nom du contrat (par défaut : Contrat) + --merge-entities Fusionner ou non les entités portant le même nom (par défaut : false) + --network-file Chemin du fichier de configuration du réseau (par défaut: "./networks.json") ``` #### Spécificités⁠ -The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option) and creates a new `dataSource`, similar to how the `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. This allows you to index implementation contracts from their proxy contracts. +La commande `graph add` récupère l'ABI depuis Etherscan (à moins qu'un chemin ABI ne soit spécifié avec l'option `--abi`) et crée une nouvelle `dataSource`, de la même manière que la commande `graph init` crée une `dataSource` `--from-contract`, en mettant à jour le schéma et les mappages en conséquence. Cela vous permet d'indexer les contrats d'implémentation à partir de leurs contrats proxy. -- The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: +- L'option `--merge-entities` identifie comment le développeur souhaite gérer les conflits de noms entre `entity` et `event` : - - If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. + - Si `true` : la nouvelle `dataSource` doit utiliser les `eventHandlers` et `entities` existants. - - If `false`: a new `entity` & `event` handler should be created with `${dataSourceName}{EventName}`. + - Si `false` : un nouveau gestionnaire `entity` & `event` doit être créé avec `${dataSourceName}{EventName}`. -- The contract `address` will be written to the `networks.json` for the relevant network. +- L'adresse du contrat sera écrite dans le fichier `networks.json` pour le réseau concerné. -> Note: When using the interactive CLI, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. +> Note : Lorsque vous utilisez la CLI interactive, après avoir exécuté `graph init` avec succès, vous serez invité à ajouter une nouvelle `dataSource`. ### Récupération des ABIs Le(s) fichier(s) ABI doivent correspondre à votre(vos) contrat(s). Il existe plusieurs façons d'obtenir des fichiers ABI : - Si vous construisez votre propre projet, vous aurez probablement accès à vos ABI les plus récents. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. +- Si vous construisez un subgraph pour un projet public, vous pouvez télécharger ce projet sur votre ordinateur et obtenir l'ABI en utilisant [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) ou en utilisant `solc` pour compiler. +- Vous pouvez également trouver l'ABI sur [Etherscan](https://etherscan.io/), mais ce n'est pas toujours fiable, car l'ABI qui y est téléchargé peut être obsolète. Assurez-vous d'avoir le bon ABI, sinon l'exécution de votre subgraph échouera. ## Versions disponibles de SpecVersion -| Version | Notes de version | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Ajout de la prise en charge des gestionnaires d'événement ayant accès aux reçus de transactions. | -| 0.0.4 | Ajout de la prise en charge du management des fonctionnalités de subgraph. | +| Version | Notes de version | +| :-----: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1.2.0 | Ajout de la prise en charge du [filtrage des arguments indexés](/#indexed-argument-filters--topic-filters) et de la déclaration `eth_call` | +| 1.1.0 | Prend en charge [Timeseries & Aggregations](#timeseries-and-aggregations). Ajout de la prise en charge du type `Int8` pour `id`. | +| 1.0.0 | Prend en charge la fonctionnalité [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) pour élaguer les subgraphs | +| 0.0.9 | Prend en charge la fonctionnalité `endBlock` | +| 0.0.8 | Ajout de la prise en charge des [gestionnaires de blocs](/developing/creating-a-subgraph/#polling-filter) et des [gestionnaires d'initialisation](/developing/creating-a-subgraph/#once-filter) d'interrogation. | +| 0.0.7 | Ajout de la prise en charge des [fichiers sources de données](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Prend en charge la variante de calcul rapide de la [Preuve d'indexation](/indexing/overview/#what-is-a-proof-of-indexing-poi). | +| 0.0.5 | Ajout de la prise en charge des gestionnaires d'événement ayant accès aux reçus de transactions. | +| 0.0.4 | Ajout de la prise en charge du management des fonctionnalités de subgraph. | From 7a56f58b8ac512e9182a9759657a8f4adbe90e9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:14 -0500 Subject: [PATCH 0828/1534] New translations install-the-cli.mdx (Spanish) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/es/subgraphs/developing/creating/install-the-cli.mdx index ef16d813c431..b605e2194260 100644 --- a/website/src/pages/es/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ Los archivos ABI deben coincidir con tu(s) contrato(s). Hay varias formas de obt ## SpecVersion Releases -| Version | Notas del lanzamiento | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Version | Notas del lanzamiento | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 2024525d00891395d6d28debbc4b6ad06eb95e4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:15 -0500 Subject: [PATCH 0829/1534] New translations install-the-cli.mdx (Arabic) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ar/subgraphs/developing/creating/install-the-cli.mdx index 76c1d923b417..ce78d54e83b6 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ## SpecVersion Releases -| الاصدار | ملاحظات الإصدار | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| الاصدار | ملاحظات الإصدار | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From f00c1c8805b20e1a8bad2e131a89f77ec6293c8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:16 -0500 Subject: [PATCH 0830/1534] New translations install-the-cli.mdx (Czech) --- .../developing/creating/install-the-cli.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/cs/subgraphs/developing/creating/install-the-cli.mdx index 913017bb3632..48d32263b2f7 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ Soubor(y) ABI se musí shodovat s vaší smlouvou. Soubory ABI lze získat něko ## SpecVersion Releases -| Verze | Poznámky vydání | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | +| Verze | Poznámky vydání | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | | 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 223c61182a23c0f1736f7953fe3aeba95a04345d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:17 -0500 Subject: [PATCH 0831/1534] New translations install-the-cli.mdx (German) --- .../developing/creating/install-the-cli.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/de/subgraphs/developing/creating/install-the-cli.mdx index fe4a2a2b0585..aae556a7c090 100644 --- a/website/src/pages/de/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/install-the-cli.mdx @@ -4,11 +4,11 @@ title: Install the Graph CLI > In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/subgraphs/developing/deploying/subgraph-studio-faq/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/resources/roles/curating/). -## Overview +## Überblick The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. -## Getting Started +## Erste Schritte ### Install the Graph CLI @@ -106,14 +106,14 @@ The ABI file(s) must match your contract(s). There are a few ways to obtain ABI ## SpecVersion Releases -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 8788c0389b41ebca7759005604616334240b1549 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:18 -0500 Subject: [PATCH 0832/1534] New translations install-the-cli.mdx (Italian) --- .../developing/creating/install-the-cli.mdx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/it/subgraphs/developing/creating/install-the-cli.mdx index da587ecf3f60..6a15118ec854 100644 --- a/website/src/pages/it/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/install-the-cli.mdx @@ -8,7 +8,7 @@ title: Installare the Graph CLI The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/subgraphs/developing/creating/subgraph-manifest/) and compiles the [mappings](/subgraphs/developing/creating/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. -## Getting Started +## Per cominciare ### Installare the Graph CLI @@ -106,14 +106,14 @@ I file ABI devono corrispondere al vostro contratto. Esistono diversi modi per o ## SpecVersion Releases -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Versione | Note di rilascio | +| :------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From a1903d13832b28b247d833a123ac1dd302ceb211 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:19 -0500 Subject: [PATCH 0833/1534] New translations install-the-cli.mdx (Japanese) --- .../developing/creating/install-the-cli.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ja/subgraphs/developing/creating/install-the-cli.mdx index 593ec6d24b2d..342b849853e8 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ ABI ファイルは、契約内容と一致している必要があります。A ## SpecVersion Releases -| バージョン | リリースノート | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | +| バージョン | リリースノート | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | | 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 2d6edfba2c63828e56390f2965b59a61d5d054c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:20 -0500 Subject: [PATCH 0834/1534] New translations install-the-cli.mdx (Korean) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ko/subgraphs/developing/creating/install-the-cli.mdx index a58e6be82324..4fd6483f544b 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The ABI file(s) must match your contract(s). There are a few ways to obtain ABI ## SpecVersion Releases -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 8e283e59285e779d3c2d3663b1ebb94fbd2a799b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:21 -0500 Subject: [PATCH 0835/1534] New translations install-the-cli.mdx (Dutch) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/nl/subgraphs/developing/creating/install-the-cli.mdx index 87c33efe428e..29e224e65481 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The ABI file(s) must match your contract(s). There are a few ways to obtain ABI ## SpecVersion Releases -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 1a896d2ea6e278892220c6ff28e0bf08941c91e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:22 -0500 Subject: [PATCH 0836/1534] New translations install-the-cli.mdx (Polish) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/pl/subgraphs/developing/creating/install-the-cli.mdx index 72001c551547..4411e4801279 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The ABI file(s) must match your contract(s). There are a few ways to obtain ABI ## SpecVersion Releases -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 80bf75b4cea4f30569107323e6a112aba485781d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:23 -0500 Subject: [PATCH 0837/1534] New translations install-the-cli.mdx (Portuguese) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/pt/subgraphs/developing/creating/install-the-cli.mdx index eda8ead451ae..738451f93b44 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ Os arquivos da ABI devem combinar com o(s) seu(s) contrato(s). Há algumas manei ## SpecVersion Releases -| Versão | Notas de atualização | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Adicionado apoio a handlers de eventos com acesso a recibos de transação. | -| 0.0.4 | Adicionado apoio à gestão de recursos de subgraph. | +| Versão | Notas de atualização | +| :----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Adicionado apoio a handlers de eventos com acesso a recibos de transação. | +| 0.0.4 | Adicionado apoio à gestão de recursos de subgraph. | From 031d82e32adec6c1b775e69f75eb169d716506d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:25 -0500 Subject: [PATCH 0838/1534] New translations install-the-cli.mdx (Russian) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ru/subgraphs/developing/creating/install-the-cli.mdx index bff4ca3d0b23..d796cfc9771a 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ## Релизы SpecVersion -| Версия | Примечания к релизу | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Добавлена поддержка обработчиков событий, имеющих доступ к чекам транзакций. | -| 0.0.4 | Добавлена ​​поддержка управления функциями субграфа. | +| Версия | Примечания к релизу | +| :----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Добавлена поддержка обработчиков событий, имеющих доступ к чекам транзакций. | +| 0.0.4 | Добавлена ​​поддержка управления функциями субграфа. | From bc6acea6a281320ddfec1d66cbaf3945a9540d73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:25 -0500 Subject: [PATCH 0839/1534] New translations install-the-cli.mdx (Swedish) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/sv/subgraphs/developing/creating/install-the-cli.mdx index 1e917f1a7cc8..fb162d4f4390 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ ABI-filerna måste matcha ditt/dina kontrakt. Det finns några olika sätt att f ## SpecVersion Releases -| Version | Versionsanteckningar | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Version | Versionsanteckningar | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From e5fb3b0f53402dffdda32f2d549931ccc5fcea3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:27 -0500 Subject: [PATCH 0840/1534] New translations install-the-cli.mdx (Turkish) --- .../developing/creating/install-the-cli.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/tr/subgraphs/developing/creating/install-the-cli.mdx index 6c69a1d90504..abda1be0f656 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ ABI dosya(lar)ı sözleşme(ler) inizle uygun olmalıdır. ABI dosyalarını edi ## SpecVersion Sürümleri -| Sürüm | Sürüm Notları | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | +| Sürüm | Sürüm Notları | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | | 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | İşleyicilerin işlem makbuzlarına erişim desteği eklendi. | -| 0.0.4 | Subgraph özelliklerini yönetme desteği eklendi. | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | İşleyicilerin işlem makbuzlarına erişim desteği eklendi. | +| 0.0.4 | Subgraph özelliklerini yönetme desteği eklendi. | From fc2a4301d20d9cd9f89ef1359243d0ac46cc417e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:28 -0500 Subject: [PATCH 0841/1534] New translations install-the-cli.mdx (Ukrainian) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/uk/subgraphs/developing/creating/install-the-cli.mdx index ea09d89ab610..72176bc552bf 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The ABI file(s) must match your contract(s). There are a few ways to obtain ABI ## SpecVersion Releases -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 35ea76152bf11bb9394583f311fcfc9663b41501 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:29 -0500 Subject: [PATCH 0842/1534] New translations install-the-cli.mdx (Chinese Simplified) --- .../developing/creating/install-the-cli.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/zh/subgraphs/developing/creating/install-the-cli.mdx index 62bb898478c0..577be7a5feeb 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ ABI 文件必须与您的合约相匹配。 获取 ABI 文件的方法有以下 ## SpecVersion Releases -| 版本 | Release 说明 | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | +| 版本 | Release 说明 | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | | 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From a79266bdddfc665726f425faddcd6aed2b56fdc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:30 -0500 Subject: [PATCH 0843/1534] New translations install-the-cli.mdx (Urdu (Pakistan)) --- .../developing/creating/install-the-cli.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/ur/subgraphs/developing/creating/install-the-cli.mdx index 70226de74267..d70293bb6e89 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ ABI فائل (فائلیں) آپ کے کنٹریکٹ (کنٹریکٹس) سے م ## SpecVersion Releases -| ورزن | جاری کردہ نوٹس | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | +| ورزن | جاری کردہ نوٹس | +| :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | | 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 028335e4c11fe22bd87cfbb60c0813d5a336632e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:31 -0500 Subject: [PATCH 0844/1534] New translations install-the-cli.mdx (Vietnamese) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/vi/subgraphs/developing/creating/install-the-cli.mdx index d79f714cf51a..a3ce4d94137f 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ## SpecVersion Releases -| Phiên bản | Ghi chú phát hành | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| Phiên bản | Ghi chú phát hành | +| :-------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 418039beba5fd5baa35549e7489a5958fdc8545c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:32 -0500 Subject: [PATCH 0845/1534] New translations install-the-cli.mdx (Marathi) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/mr/subgraphs/developing/creating/install-the-cli.mdx index 210e247b2270..421b1b1e2c1c 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The ABI file(s) must match your contract(s). There are a few ways to obtain ABI ## SpecVersion Releases -| आवृत्ती | रिलीझ नोट्स | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | +| आवृत्ती | रिलीझ नोट्स | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | From 80631e59d8365b45964e3c182cb99a233a507282 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:33 -0500 Subject: [PATCH 0846/1534] New translations install-the-cli.mdx (Hindi) --- .../developing/creating/install-the-cli.mdx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/install-the-cli.mdx b/website/src/pages/hi/subgraphs/developing/creating/install-the-cli.mdx index f147a244ebca..8a093b985cc3 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/install-the-cli.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/install-the-cli.mdx @@ -106,14 +106,14 @@ The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is ## स्पेकवर्जन रिलीज़ -| संस्करण | रिलीज नोट्स | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | घटना हैंडलरों को लेनदेन रसीदों तक पहुंच प्रदान करने के लिए समर्थन जोड़ा गया है। | -| 0.0.4 | घटना हैंडलरों को लेनदेन रसीदों तक पहुंच प्रदान करने के लिए समर्थन जोड़ा गया है। | +| Version | Release notes | +| :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/indexing/overview/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | घटना हैंडलरों को लेनदेन रसीदों तक पहुंच प्रदान करने के लिए समर्थन जोड़ा गया है। | +| 0.0.4 | घटना हैंडलरों को लेनदेन रसीदों तक पहुंच प्रदान करने के लिए समर्थन जोड़ा गया है। | From e2a33bd3dcc758fa28e94a7f59cf3ffa563e34e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:34 -0500 Subject: [PATCH 0847/1534] New translations ql-schema.mdx (Romanian) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ro/subgraphs/developing/creating/ql-schema.mdx index d148cf2ab1fb..4ecef1f613b3 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Many-To-Many Relationships For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. From 48d34b4bb328c2ae9e82c46c2d80e6378dc7e915 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:35 -0500 Subject: [PATCH 0848/1534] New translations ql-schema.mdx (French) --- .../developing/creating/ql-schema.mdx | 126 ++++++++++-------- 1 file changed, 69 insertions(+), 57 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/fr/subgraphs/developing/creating/ql-schema.mdx index 168976049040..a8850ccaee9f 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/ql-schema.mdx @@ -1,28 +1,28 @@ --- -title: The Graph QL Schema +title: Schema The Graph QL --- ## Aperçu -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. +Le schéma de votre subgraph se trouve dans le fichier `schema.graphql`. Les schémas GraphQL sont définis à l'aide du langage de définition d'interface GraphQL. -> Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/subgraphs/querying/graphql-api/) section. +> Remarque : si vous n'avez jamais écrit de schéma GraphQL, il est recommandé de consulter ce guide sur le système de types GraphQL. La documentation de référence pour les schémas GraphQL est disponible dans la section [API GraphQL](/subgraphs/querying/graphql-api/). ### Définition des entités Avant de définir des entités, il est important de prendre du recul et de réfléchir à la manière dont vos données sont structurées et liées. -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- Toutes les requêtes seront effectuées sur le modèle de données défini dans le schéma de subgraph. Par conséquent, la conception du schéma de subgraph doit être informée par les requêtes que votre application devra exécuter. - Il peut être utile d'imaginer les entités comme des "objets contenant des données", plutôt que comme des événements ou des fonctions. -- You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. -- Each type that should be an entity is required to be annotated with an `@entity` directive. +- Vous définissez les types d'entités dans `schema.graphql`, et Graph Node générera des champs de premier niveau pour interroger des instances uniques et des collections de ce type d'entité. +- Chaque type qui doit être une entité doit être annoté avec une directive `@entity`. - Par défaut, les entités sont mutables, ce qui signifie que les mappages peuvent charger des entités existantes, les modifier et stocker une nouvelle version de cette entité. - - Mutability comes at a price, so for entity types that will never be modified, such as those containing data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. + - La mutabilité a un prix, donc pour les types d'entités qui ne seront jamais modifiés, comme ceux contenant des données extraites textuellement de la blockchain, il est recommandé de les marquer comme immuables avec `@entity(immutable: true)`. - Si des changements se produisent dans le même bloc où l'entité a été créée, alors les mappages peuvent effectuer des changements sur les entités immuables. Les entités immuables sont beaucoup plus rapides à écrire et à interroger, donc elles devraient être utilisées chaque fois que c'est possible. #### Bon exemple -The following `Gravatar` entity is structured around a Gravatar object and is a good example of how an entity could be defined. +L'entité `Gravatar` suivante est structurée autour d'un objet Gravatar et constitue un bon exemple de la manière dont une entité pourrait être définie. ```graphql type Gravatar @entity(immutable: true) { @@ -36,7 +36,7 @@ type Gravatar @entity(immutable: true) { #### Mauvais exemple -The following example `GravatarAccepted` and `GravatarDeclined` entities are based around events. It is not recommended to map events or function calls to entities 1:1. +Les exemples d'entités `GravatarAccepted` et `GravatarDeclined` suivants sont basés sur des événements. Il n'est pas recommandé de mapper des événements ou des appels de fonction à des entités 1:1. ```graphql type GravatarAccepted @entity { @@ -56,15 +56,15 @@ type GravatarDeclined @entity { #### Champs facultatifs et obligatoires -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If the field is a scalar field, you get an error when you try to store the entity. If the field references another entity then you get this error: +Les champs d'entité peuvent être définis comme obligatoires ou facultatifs. Les champs obligatoires sont indiqués par le `!` dans le schéma. Si le champ est un champ scalaire, vous obtenez une erreur lorsque vous essayez de stocker l'entité. Si le champ fait référence à une autre entité, vous obtenez cette erreur : ``` Null value resolved for non-null field 'name' ``` -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. +Chaque entité doit avoir un champ `id`, qui doit être de type `Bytes!` ou `String!`. Il est généralement recommandé d'utiliser `Bytes!`, à moins que l'`id` ne contienne du texte lisible par l'homme, car les entités avec des identifiants `Bytes!` seront plus rapides à écrire et à interroger que celles avec un `id` `String!`. Le champ `id` sert de clé primaire et doit être unique parmi toutes les entités du même type. Pour des raisons historiques, le type `ID!` est également accepté et est un synonyme de `String!`. -For some entity types the `id` for `Bytes!` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id) ` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. +Pour certains types d'entités, l'`id` de `Bytes!` est construit à partir des id de deux autres entités ; cela est possible en utilisant `concat`, par exemple, `let id = left.id.concat(right.id) ` pour former l'id à partir des id de `left` et `right`. De même, pour construire un identifiant à partir de l'identifiant d'une entité existante et d'un compteur `count`, `let id = left.id.concatI32(count)` peut être utilisé. La concaténation est garantie pour produire des identifiants uniques tant que la longueur de `left` est la même pour toutes ces entités, par exemple, parce que `left.id` est une `Address`. ### Types scalaires intégrés @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two Les scalaires suivants sont supportés dans l'API GraphQL : -| Type | Description | -| --- | --- | -| `Bytes` | Tableau d'octets, représenté sous forme de chaîne hexadécimale. Couramment utilisé pour les hachages et adresses Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Tableau d'octets, représenté sous forme de chaîne hexadécimale. Couramment utilisé pour les hachages et adresses Ethereum. | +| `String` | Scalaire pour les valeurs de type `string`. Les caractères nuls ne sont pas pris en charge et sont automatiquement supprimés. | +| `Boolean` | Scalaire pour les valeurs de type `boolean` (booléennes). | +| `Int` | La spécification GraphQL définit `Int` comme un entier signé de 32 bits. | +| `Int8` | Un entier signé de 8 octets, également connu sous le nom d'entier signé de 64 bits, peut stocker des valeurs comprises entre -9 223 372 036 854 775 808 et 9 223 372 036 854 775 807. Il est préférable de l'utiliser pour représenter `i64` de l'ethereum. | +| `BigInt` | Grands entiers. Utilisé pour les types Ethereum `uint32`, `int64`, `uint64`, ..., `uint256`. Note : Tout ce qui est inférieur à `uint32`, comme `int32`, `uint24` ou `int8` est représenté par `i32`. | +| `BigDecimal` | `BigDecimal` Décimales de haute précision représentées par un significatif et un exposant. L'exposant est compris entre -6143 et +6144. Arrondi à 34 chiffres significatifs. | +| `Timestamp` | Il s'agit d'une valeur `i64` en microsecondes. Couramment utilisé pour les champs `timestamp` des séries chronologiques et des agrégations. | ### Enums @@ -95,9 +95,9 @@ enum TokenStatus { } ``` -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: +Une fois que l'enum est défini dans le schéma, vous pouvez utiliser la représentation en chaîne de caractère de la valeur de l'enum pour définir un champ de l'enum sur une entité. Par exemple, vous pouvez fixer le `tokenStatus` à `SecondOwner` en définissant d'abord votre entité et en fixant ensuite le champ avec `entity.tokenStatus = "SecondOwner"`. L'exemple ci-dessous montre à quoi ressemblerait l'entité Token avec un champ enum : -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). +Pour plus de détails sur l'écriture des énumérations, voir la [documentation GraphQL](https://graphql.org/learn/schema/). ### Relations entre entités @@ -107,7 +107,7 @@ Les relations sont définies sur les entités comme n'importe quel autre champ s #### Relations individuelles -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: +Définir un type d'entité `Transaction` avec une relation optionnelle de type un-à-un avec un type d'entité `TransactionReceipt` : ```graphql type Transaction @entity(immutable: true) { @@ -123,7 +123,7 @@ type TransactionReceipt @entity(immutable: true) { #### Relations un-à-plusieurs -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: +Définir un type d'entité `TokenBalance` avec une relation obligatoire de type un-à-plusieurs avec un type d'entité Token : ```graphql type Token @entity(immutable: true) { @@ -139,13 +139,13 @@ type TokenBalance @entity { ### Recherches inversées -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. +Les recherches inversées peuvent être définies sur une entité à travers le champ `@derivedFrom`. Cela crée un champ virtuel sur l'entité qui peut être interrogé mais qui ne peut pas être défini manuellement par l'intermédiaire de l'API des correspondances. Il est plutôt dérivé de la relation définie sur l'autre entité. Pour de telles relations, il est rarement utile de stocker les deux côtés de la relation, et l'indexation et les performances des requêtes seront meilleures si un seul côté est stocké et que l'autre est dérivé. Pour les relations un-à-plusieurs, la relation doit toujours être stockée du côté « un » et le côté « plusieurs » doit toujours être dérivé. Stocker la relation de cette façon, plutôt que de stocker un tableau d'entités du côté « plusieurs », entraînera des performances considérablement meilleures pour l'indexation et l'interrogation du sous-graphe. En général, le stockage de tableaux d’entités doit être évité autant que possible. #### Exemple -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: +Nous pouvons rendre les soldes d'un token accessibles à partir du token en dérivant un champ `tokenBalances` : ```graphql type Token @entity(immutable: true) { @@ -160,13 +160,25 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Relations plusieurs-à-plusieurs Pour les relations plusieurs-à-plusieurs, telles que les utilisateurs pouvant appartenir à un nombre quelconque d'organisations, la manière la plus simple, mais généralement pas la plus performante, de modéliser la relation consiste à créer un tableau dans chacune des deux entités impliquées. Si la relation est symétrique, un seul côté de la relation doit être stocké et l’autre côté peut être dérivé. #### Exemple -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. +Définir une recherche inversée d'un type d'entité `User` vers un type d'entité `Organization`. Dans l'exemple ci-dessous, ceci est réalisé en recherchant l'attribut `members` à l'intérieur de l'entité `Organization`. Dans les requêtes, le champ `organizations` de `User` sera résolu en trouvant toutes les entités `Organization` qui incluent l'ID de l'user. ```graphql type Organization @entity { @@ -182,7 +194,7 @@ type User @entity { } ``` -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like +Une façon plus performante de stocker cette relation est d'utiliser une table de correspondance qui a une entrée pour chaque paire `User` / `Organization` avec un schéma tel que ```graphql type Organization @entity { @@ -210,7 +222,7 @@ Cette approche nécessite que les requêtes descendent vers un niveau supplémen query usersWithOrganizations { users { organizations { - # ceci est une entité UserOrganization + # ceci est une entité UserOrganization organization { name } @@ -223,7 +235,7 @@ Cette manière plus élaborée de stocker des relations plusieurs-à-plusieurs e ### Ajouter des commentaires au schéma -As per GraphQL spec, comments can be added above schema entity attributes using the hash symbol `#`. This is illustrated in the example below: +Conformément à la spécification GraphQL, des commentaires peuvent être ajoutés au-dessus des attributs des entités du schéma en utilisant le symbole dièse `#`. Ceci est illustré dans l'exemple ci-dessous : ```graphql type MyFirstEntity @entity { @@ -239,7 +251,7 @@ Les requêtes de recherche en texte intégral filtrent et classent les entités Une définition de requête en texte intégrale inclut le nom de la requête, le dictionnaire de langue utilisé pour traiter les champs de texte, l'algorithme de classement utilisé pour classer les résultats et les champs inclus dans la recherche. Chaque requête en texte intégral peut s'étendre sur plusieurs champs, mais tous les champs inclus doivent provenir d'un seul type d'entité. -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. +Pour ajouter une requête fulltext, incluez un type `_Schema_` avec une directive fulltext dans le schéma GraphQL. ```graphql type _Schema_ @@ -262,7 +274,7 @@ type Band @entity { } ``` -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/subgraphs/querying/graphql-api/#queries) for a description of the fulltext search API and more example usage. +Le champ d'exemple `bandSearch` peut être utilisé dans des requêtes pour filtrer les entités `Band` sur la base des documents texte dans les champs `name`, `description` et `bio`. Allez à [Requêtes - API GraphQL API ](/subgraphs/querying/graphql-api/#queries) pour une description de l'API de recherche plein texte et d'autres exemples d'utilisation. ```graphql query { @@ -275,7 +287,7 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Gestion des fonctionnalités](#experimental-features):** A partir de `specVersion` `0.0.4`, `fullTextSearch` doit être déclaré dans la section `features` du manifeste du subgraph. ## Langues prises en charge @@ -283,30 +295,30 @@ Le choix d'une langue différente aura un effet définitif, bien que parfois sub Dictionnaires de langues pris en charge : -| Code | Dictionnaire | -| ------ | ------------ | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portugais | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | Dictionnaire | +| ------ | ---------------- | +| simple | Général | +| da | Danois | +| nl | Néerlandais | +| en | Anglais | +| fi | Finlandais | +| fr | Français | +| de | Allemand | +| hu | Hongrois | +| it | Italien | +| no | Norvégien | +| pt | Portugais | +| ro | Roumain | +| ru | Russe | +| es | Espagnol | +| sv | Suédois | +| tr | Turc | ### Algorithmes de classement Algorithmes de classement: -| Algorithm | Description | -| --- | --- | -| rank | Utilisez la qualité de correspondance (0-1) de la requête en texte intégral pour trier les résultats. | -| proximitéRang | Similar to rank but also includes the proximity of the matches. | +| Algorithme | Description | +| -------------- | ----------------------------------------------------------------------------------------------------- | +| rank | Utilisez la qualité de correspondance (0-1) de la requête en texte intégral pour trier les résultats. | +| proximitéRang | Similaire au classement, mais inclut également la proximité des correspondances. | From 4136cea6225abcfed936c99c3f82b05fa6a1b2df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:36 -0500 Subject: [PATCH 0849/1534] New translations ql-schema.mdx (Spanish) --- .../developing/creating/ql-schema.mdx | 40 ++++++++++++------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/es/subgraphs/developing/creating/ql-schema.mdx index cb79b75fa584..eba35650c4ec 100644 --- a/website/src/pages/es/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Tipo | Descripción | -| --- | --- | -| `Bytes` | Byte array, representado como un string hexadecimal. Comúnmente utilizado para los hashes y direcciones de Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Tipo | Descripción | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, representado como un string hexadecimal. Comúnmente utilizado para los hashes y direcciones de Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Relaciones de many-to-many Para las relaciones de many-to-many, como los usuarios pueden pertenecer a cualquier número de organizaciones, la forma más directa, pero generalmente no la más eficaz, de modelar la relación es en un array en cada una de las dos entidades implicadas. Si la relación es simétrica, sólo es necesario almacenar un lado de la relación y el otro puede derivarse. @@ -306,7 +318,7 @@ Diccionarios de idiomas admitidos: Algoritmos admitidos para ordenar los resultados: -| Algorithm | Description | -| --- | --- | -| rank | Usa la calidad de coincidencia (0-1) de la consulta de texto completo para ordenar los resultados. | -| rango de proximidad | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------------- | -------------------------------------------------------------------------------------------------- | +| rank | Usa la calidad de coincidencia (0-1) de la consulta de texto completo para ordenar los resultados. | +| rango de proximidad | Similar to rank but also includes the proximity of the matches. | From 8758c222149664eb5820e2ffe89e6c1113dd0d74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:37 -0500 Subject: [PATCH 0850/1534] New translations ql-schema.mdx (Arabic) --- .../developing/creating/ql-schema.mdx | 44 ++++++++++++------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ar/subgraphs/developing/creating/ql-schema.mdx index cc8e54d39e64..2098109cb101 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| النوع | الوصف | -| --- | --- | -| `Bytes` | مصفوفة Byte ، ممثلة كسلسلة سداسية عشرية. يشيع استخدامها في Ethereum hashes وعناوينه. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| النوع | الوصف | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | مصفوفة Byte ، ممثلة كسلسلة سداسية عشرية. يشيع استخدامها في Ethereum hashes وعناوينه. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -143,7 +143,7 @@ Reverse lookups can be defined on an entity through the `@derivedFrom` field. Th For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. -#### مثال +#### Example We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: @@ -160,11 +160,23 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### علاقات متعدد_لمتعدد For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. -#### مثال +#### Example Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. @@ -306,7 +318,7 @@ Supported language dictionaries: Supported algorithms for ordering results: -| Algorithm | Description | -| ------------- | --------------------------------------------------------------- | -| rank | استخدم جودة مطابقة استعلام النص-الكامل (0-1) لترتيب النتائج. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | استخدم جودة مطابقة استعلام النص-الكامل (0-1) لترتيب النتائج. | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From 4c63f7bb0259bef7dc937d5ee79765eb9152ae98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:38 -0500 Subject: [PATCH 0851/1534] New translations ql-schema.mdx (Czech) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/cs/subgraphs/developing/creating/ql-schema.mdx index ae9d13f0f547..9ad84ea2f335 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Typ | Popis | -| --- | --- | -| `Bytes` | Pole bajtů reprezentované jako hexadecimální řetězec. Běžně se používá pro hashe a adresy Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Typ | Popis | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Pole bajtů reprezentované jako hexadecimální řetězec. Běžně se používá pro hashe a adresy Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Vztahy mnoho k mnoha Pro vztahy mnoho-více, jako jsou uživatelé, z nichž každý může patřit do libovolného počtu organizací, je nejjednodušší, ale obecně ne nejvýkonnější, modelovat vztah jako pole v každé z obou zúčastněných entit. Pokud je vztah symetrický, je třeba uložit pouze jednu stranu vztahu a druhou stranu lze odvodit. From 373b85597338878e9abbd31fc6845ee76331846c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:39 -0500 Subject: [PATCH 0852/1534] New translations ql-schema.mdx (German) --- .../developing/creating/ql-schema.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/de/subgraphs/developing/creating/ql-schema.mdx index 27562f970620..369ccdd4119a 100644 --- a/website/src/pages/de/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/ql-schema.mdx @@ -2,7 +2,7 @@ title: The Graph QL Schema --- -## Overview +## Überblick The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Beschreibung | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -143,7 +143,7 @@ Reverse lookups can be defined on an entity through the `@derivedFrom` field. Th For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. -#### Example +#### Beispiel We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: @@ -176,7 +176,7 @@ tokenBalance.save() For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. -#### Example +#### Beispiel Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. From 3d70893e142ee2e88bc3e6f6d31ce91cf2db2965 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:40 -0500 Subject: [PATCH 0853/1534] New translations ql-schema.mdx (Italian) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/it/subgraphs/developing/creating/ql-schema.mdx index 77d065c06881..05ab1fe4c590 100644 --- a/website/src/pages/it/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Tipo | Descrizione | -| --- | --- | -| `Bytes` | Byte array, rappresentato come una stringa esadecimale. Comunemente utilizzato per gli hash e gli indirizzi di Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Tipo | Descrizione | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, rappresentato come una stringa esadecimale. Comunemente utilizzato per gli hash e gli indirizzi di Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enum @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Relazioni molti-a-molti Per le relazioni molti-a-molti, come ad esempio gli utenti che possono appartenere a un numero qualsiasi di organizzazioni, il modo più semplice, ma generalmente non il più performante, di modellare la relazione è come un array in ciascuna delle due entità coinvolte. Se la relazione è simmetrica, è necessario memorizzare solo un lato della relazione e l'altro lato può essere derivato. From c875888ee282533764c7dd02d40561382ec086bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:41 -0500 Subject: [PATCH 0854/1534] New translations ql-schema.mdx (Japanese) --- .../developing/creating/ql-schema.mdx | 74 +++++++++++-------- 1 file changed, 43 insertions(+), 31 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ja/subgraphs/developing/creating/ql-schema.mdx index eaa78acfc174..0df29684fe76 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| タイプ | 説明書き | -| --- | --- | -| `Bytes` | Byte 配列で、16 進数の文字列で表されます。Ethereum のハッシュやアドレスによく使われます。 | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| タイプ | 説明書き | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte 配列で、16 進数の文字列で表されます。Ethereum のハッシュやアドレスによく使われます。 | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### 多対多のリレーションシップ ユーザーがそれぞれ任意の数の組織に所属しているような多対多の関係の場合、関係をモデル化する最も簡単な方法は、関係する 2 つのエンティティのそれぞれに配列として格納することですが、一般的には最もパフォーマンスの高い方法ではありません。対称的な関係であれば、関係の片側のみを保存する必要があり、もう片側は派生させることができます。 @@ -284,29 +296,29 @@ query { サポートされている言語の辞書: | Code | 辞書 | -| ------ | ------------ | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | ポルトガル語 | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | ポルトガル語 | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### ランキングアルゴリズム サポートされている結果の順序付けのアルゴリズム: -| Algorithm | Description | -| ------------- | ------------------------------------------------------------------- | -| rank | フルテキストクエリのマッチ品質 (0-1) を使用して結果を並べ替えます。 | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | フルテキストクエリのマッチ品質 (0-1) を使用して結果を並べ替えます。 | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From c4aa06e7e750a2a15e66f686fbd7a8fbb5752bfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:42 -0500 Subject: [PATCH 0855/1534] New translations ql-schema.mdx (Korean) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ko/subgraphs/developing/creating/ql-schema.mdx index d148cf2ab1fb..4ecef1f613b3 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Many-To-Many Relationships For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. From 6c134545144dc9dfd83550cfa380f8005df89994 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:43 -0500 Subject: [PATCH 0856/1534] New translations ql-schema.mdx (Dutch) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/nl/subgraphs/developing/creating/ql-schema.mdx index d148cf2ab1fb..4ecef1f613b3 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Many-To-Many Relationships For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. From 7cdbc1ff4195e1f231403e87936c18c1f7d33401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:44 -0500 Subject: [PATCH 0857/1534] New translations ql-schema.mdx (Polish) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/pl/subgraphs/developing/creating/ql-schema.mdx index d148cf2ab1fb..4ecef1f613b3 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Many-To-Many Relationships For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. From 4e37c17ea475053cdb98e1b73e1acb163643910d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:45 -0500 Subject: [PATCH 0858/1534] New translations ql-schema.mdx (Portuguese) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/pt/subgraphs/developing/creating/ql-schema.mdx index 5a2888544ea1..6bb4c8e21c9b 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Tipo | Descrição | -| --- | --- | -| `Bytes` | Arranjo de bytes, representado como string hexadecimal. Usado frequentemente por hashes e endereços no Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Tipo | Descrição | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Arranjo de bytes, representado como string hexadecimal. Usado frequentemente por hashes e endereços no Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Relacionamentos Vários-com-Vários Para relacionamentos vários-com-vários, como um conjunto de utilizadores em que cada um pertence a qualquer número de organizações, o relacionamento é mais simplesmente — mas não mais eficientemente — modelado como um arranjo em cada uma das duas entidades envolvidas. Se o relacionamento for simétrico, apenas um lado do relacionamento precisa ser armazenado, e o outro lado pode ser derivado. From e2740b2810447c9140a65369b0ef6806195839d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:46 -0500 Subject: [PATCH 0859/1534] New translations ql-schema.mdx (Russian) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ru/subgraphs/developing/creating/ql-schema.mdx index 9c7498da0fbb..fbf13c8c1415 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two В API GraphQL поддерживаются следующие скаляры: -| Тип | Описание | -| --- | --- | -| `Bytes` | Массив байтов, представленный в виде шестнадцатеричной строки. Обычно используется для хэшей и адресов Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Тип | Описание | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Массив байтов, представленный в виде шестнадцатеричной строки. Обычно используется для хэшей и адресов Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Перечисления @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Связи "Многие ко многим" Для связей "многие ко многим", таких, например, как пользователи, каждый из которых может принадлежать к любому числу организаций, наиболее простым, но, как правило, не самым производительным способом моделирования связей является создание массива в каждом из двух задействованных объектов. Если связь симметрична, то необходимо сохранить только одну сторону связи, а другая сторона может быть выведена. From e64fdcc395ffcc43999cb8833f119e9459e992cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:47 -0500 Subject: [PATCH 0860/1534] New translations ql-schema.mdx (Swedish) --- .../developing/creating/ql-schema.mdx | 75 ++++++++++++------- 1 file changed, 46 insertions(+), 29 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/sv/subgraphs/developing/creating/ql-schema.mdx index bdfb8a1357a5..0a4b2f7a4193 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Typ | Beskrivning | -| --- | --- | -| `Bytes` | Bytematris, representerad som en hexadecimal sträng. Vanligt används för Ethereum-hashar och adresser. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Typ | Beskrivning | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Bytematris, representerad som en hexadecimal sträng. Vanligt används för Ethereum-hashar och adresser. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Många-till-många-relationer För många-till-många-relationer, som till exempel användare som var och en kan tillhöra ett antal organisationer, är det mest raka, men generellt sett inte den mest prestanda-optimerade, sättet att modellera relationen som en array i vardera av de två entiteter som är involverade. Om relationen är symmetrisk behöver bara ena sidan av relationen lagras och den andra sidan kan härledas. @@ -247,7 +259,12 @@ type _Schema_ name: "bandSearch" language: en algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] + include: [ + { + entity: "Band" + fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] + } + ] ) type Band @entity { @@ -283,24 +300,24 @@ Att välja ett annat språk kommer att ha en definitiv, om än ibland subtil, ef Stödda språkordböcker: -| Code | Ordbok | -| ----- | ------------ | -| enkel | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portugisiska | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | Ordbok | +| ------ | ------------ | +| enkel | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | Portugisiska | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### Rankningsalgoritmer From 685e0101cb23bfe9a4a58c697709eeaca07c7ea4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:48 -0500 Subject: [PATCH 0861/1534] New translations ql-schema.mdx (Turkish) --- .../developing/creating/ql-schema.mdx | 68 +++++++++++-------- 1 file changed, 40 insertions(+), 28 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/tr/subgraphs/developing/creating/ql-schema.mdx index 6016e3cbfe03..97d3b85af90f 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two GraphQL API'sinde desteklenen skalarlardan bazıları şunlardır: -| Tür | Tanım | -| --- | --- | -| `Bytes` | Byte dizisi, onaltılık bir dizgi olarak temsil edilir. Ethereum hash değerleri ve adresleri için yaygın olarak kullanılır. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Tür | Tanım | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte dizisi, onaltılık bir dizgi olarak temsil edilir. Ethereum hash değerleri ve adresleri için yaygın olarak kullanılır. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Numaralandırmalar @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Çoktan Çoğa İlişkiler Kullanıcıların her birinin birden çok kuruluşa mensup olabileceği gibi çoktan çoğa ilişkilerde, ilişkiyi modellemenin en basit fakat pek verimli olmayan yolu, ilişkide yer alan iki varlıkta da bir dizi olarak saklamaktır. İlişki simetrik ise, ilişkinin yalnızca bir tarafının saklanması gerekir ve diğer taraf türetilebilir. @@ -283,24 +295,24 @@ Farklı bir dil seçmek, tam metin arama API'sı üzerinde bazen az olsa da kesi Desteklenen dil sözlükleri: -| Code | Sözlük | -| ----- | ---------- | -| yalın | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portekizce | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | Sözlük | +| ------ | ---------- | +| yalın | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | Portekizce | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### Algoritmaları Sıralama From 282b1f4e0953fb2eea5dfb5ad674ac3a78087829 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:49 -0500 Subject: [PATCH 0862/1534] New translations ql-schema.mdx (Ukrainian) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/uk/subgraphs/developing/creating/ql-schema.mdx index d148cf2ab1fb..4ecef1f613b3 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Type | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Many-To-Many Relationships For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. From d378c8ba6416597cdd7c8740495540e11f94f9be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:50 -0500 Subject: [PATCH 0863/1534] New translations ql-schema.mdx (Chinese Simplified) --- .../developing/creating/ql-schema.mdx | 76 +++++++++++-------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/zh/subgraphs/developing/creating/ql-schema.mdx index 224a3fb0aa34..e72f7995eb42 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| 类型 | 描述 | -| --- | --- | -| `Bytes` | 字节数组,表示为十六进制字符串。 通常用于以太坊hash和地址。 | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| 类型 | 描述 | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | 字节数组,表示为十六进制字符串。 通常用于以太坊hash和地址。 | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### 枚举类型 @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### 多对多关系 对于多对多关系,例如每个可能属于任意数量的组织的用户,对关系建模的最直接,但通常不是最高效的方法,是在所涉及的两个实体中的每一个中定义数组。 如果关系是对称的,则只需要存储关系的一侧联系,就可以导出另一侧。 @@ -283,30 +295,30 @@ query { 支持的语言词典: -| Code | 词典 | -| ------ | --------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | 葡萄牙语 | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | 词典 | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | 葡萄牙语 | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### 排序算法 支持的排序结果算法: -| Algorithm | Description | -| ------------- | --------------------------------------------------------------- | -| rank | 使用全文查询的匹配质量 (0-1) 对结果进行排序。 | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | 使用全文查询的匹配质量 (0-1) 对结果进行排序。 | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From 4e4d186ebc78bed1f10eec47fe37bbdc38d98fec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:51 -0500 Subject: [PATCH 0864/1534] New translations ql-schema.mdx (Urdu (Pakistan)) --- .../developing/creating/ql-schema.mdx | 40 ++++++++++++------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/ur/subgraphs/developing/creating/ql-schema.mdx index c17ec92f7266..7a5155c51cba 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| قسم | تفصیل | -| --- | --- | -| `Bytes` | Byte array، ایک ہیکساڈیسیمل سٹرنگ کے طور پر پیش کیا جاتا ہے. عام طور پر Ethereum hashes اور ایڈریسیس کے لیے استعمال ہوتا ہے. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| قسم | تفصیل | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array، ایک ہیکساڈیسیمل سٹرنگ کے طور پر پیش کیا جاتا ہے. عام طور پر Ethereum hashes اور ایڈریسیس کے لیے استعمال ہوتا ہے. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### ون-ٹو-مینی تعلقات مینی-ٹو-مینی تعلقات کے لیے، جیسے کہ صارفین جن میں سے ہر ایک کا تعلق کسی بھی تعداد میں تنظیموں سے ہو سکتا ہے، سب سے سیدھا، لیکن عام طور پر سب سے زیادہ پرفارمنس نہیں، تعلق کو ماڈل کرنے کا طریقہ شامل دونوں ہستیوں میں سے ہر ایک میں ایک ایرے کے طور پر ہے۔ اگر تعلق ہم آہنگ ہے تو، رشتے کے صرف ایک رخ کو ذخیرہ کرنے کی ضرورت ہے اور دوسری طرف اخذ کیا جا سکتا ہے. @@ -306,7 +318,7 @@ query { نتائج ترتیب دینے کے لیے معاون الگورتھم: -| Algorithm | Description | -| ------------ | --------------------------------------------------------------------------- | -| rank | نتائج ترتیب دینے کے لیے فل ٹیکسٹ کیوری کے میچ کوالٹی (1-0) کا استعمال کریں. | -| قربت کا درجہ | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | --------------------------------------------------------------------------- | +| rank | نتائج ترتیب دینے کے لیے فل ٹیکسٹ کیوری کے میچ کوالٹی (1-0) کا استعمال کریں. | +| قربت کا درجہ | Similar to rank but also includes the proximity of the matches. | From 710a9faeeb28b81990571c4be1945e2135fbf305 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:52 -0500 Subject: [PATCH 0865/1534] New translations ql-schema.mdx (Vietnamese) --- .../developing/creating/ql-schema.mdx | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/vi/subgraphs/developing/creating/ql-schema.mdx index a2d88ceca9e5..094667a8a1c4 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| Loại | Miêu tả | -| --- | --- | -| `Bytes` | Mảng byte, được biểu diễn dưới dạng chuỗi thập lục phân. Thường được sử dụng cho các mã băm và địa chỉ Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| Loại | Miêu tả | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Mảng byte, được biểu diễn dưới dạng chuỗi thập lục phân. Thường được sử dụng cho các mã băm và địa chỉ Ethereum. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### Mối quan hệ nhiều-nhiều For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. From a99e994faba1ced52b487bae1b9ce8c1e826f706 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:53 -0500 Subject: [PATCH 0866/1534] New translations ql-schema.mdx (Marathi) --- .../developing/creating/ql-schema.mdx | 76 +++++++++++-------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/mr/subgraphs/developing/creating/ql-schema.mdx index 1328e880b019..c150fd46a8b7 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two The following scalars are supported in the GraphQL API: -| प्रकार | वर्णन | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| प्रकार | वर्णन | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### एनम्स @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### अनेक-ते-अनेक संबंध अनेक-ते-अनेक नातेसंबंधांसाठी, जसे की वापरकर्ते जे कोणत्याही संस्थेशी संबंधित असू शकतात, सर्वात सरळ, परंतु सामान्यतः सर्वात कार्यक्षम नसतात, संबंध मॉडेल करण्याचा मार्ग समाविष्ट असलेल्या दोन घटकांपैकी प्रत्येकामध्ये एक अॅरे आहे. नातेसंबंध सममितीय असल्यास, नातेसंबंधाची फक्त एक बाजू संग्रहित करणे आवश्यक आहे आणि दुसरी बाजू मिळवता येते. @@ -283,30 +295,30 @@ query { समर्थित भाषा शब्दकोश: -| Code | शब्दकोश | -| ---- | --------- | -| सोपे | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | पोर्तुगीज | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | शब्दकोश | +| ------ | ---------- | +| सोपे | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | पोर्तुगीज | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### रँकिंग अल्गोरिदम परिणाम ऑर्डर करण्यासाठी समर्थित अल्गोरिदम: -| Algorithm | Description | -| ------------- | ---------------------------------------------------------------------- | -| rank | निकाल ऑर्डर करण्यासाठी फुलटेक्स्ट क्वेरीची जुळणी गुणवत्ता (0-1) वापरा. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | निकाल ऑर्डर करण्यासाठी फुलटेक्स्ट क्वेरीची जुळणी गुणवत्ता (0-1) वापरा. | +| proximityRank | Similar to rank but also includes the proximity of the matches. | From fcb0e6b8cfdfb57b95b5115a28b863fa0bc1a4b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:54 -0500 Subject: [PATCH 0867/1534] New translations ql-schema.mdx (Hindi) --- .../developing/creating/ql-schema.mdx | 68 +++++++++++-------- 1 file changed, 40 insertions(+), 28 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/ql-schema.mdx b/website/src/pages/hi/subgraphs/developing/creating/ql-schema.mdx index df1edc8c2c7c..e32b10927b2f 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/ql-schema.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/ql-schema.mdx @@ -72,16 +72,16 @@ For some entity types the `id` for `Bytes!` is constructed from the id's of two नीचे दिए गए स्केलर्स GraphQL API में समर्थित हैं: -| प्रकार | विवरण | -| --- | --- | -| `Bytes` | बाइट सरणी, एक हेक्साडेसिमल स्ट्रिंग के रूप में दर्शाया गया है। आमतौर पर एथेरियम हैश और पतों के लिए उपयोग किया जाता है। | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | +| प्रकार | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Bytes` | बाइट सरणी, एक हेक्साडेसिमल स्ट्रिंग के रूप में दर्शाया गया है। आमतौर पर एथेरियम हैश और पतों के लिए उपयोग किया जाता है। | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | ### Enums @@ -160,6 +160,18 @@ type TokenBalance @entity { } ``` +Here is an example of how to write a mapping for a subgraph with reverse lookups: + +```typescript +let token = new Token(event.address) // Create Token +token.save() // tokenBalances is derived automatically + +let tokenBalance = new TokenBalance(event.address) +tokenBalance.amount = BigInt.fromI32(0) +tokenBalance.token = token.id // Reference stored here +tokenBalance.save() +``` + #### अनेक-से-अनेक संबंध मैनी-टू-मैनी संबंधों के लिए, जैसे कि प्रत्येक उपयोगकर्ता किसी भी संख्या में संगठनों से संबंधित हो सकता है, सबसे सरल, लेकिन आम तौर पर सबसे अधिक प्रदर्शनकारी नहीं, संबंध को मॉडल करने का तरीका शामिल दो संस्थाओं में से प्रत्येक में एक सरणी के रूप में है। यदि संबंध सममित है, तो संबंध के केवल एक पक्ष को संग्रहित करने की आवश्यकता है और दूसरे पक्ष को व्युत्पन्न किया जा सकता है। @@ -283,24 +295,24 @@ query { समर्थित भाषा शब्दकोश: -| Code | शब्दकोष | -| ------ | --------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | पुर्तगाली | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Code | शब्दकोष | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | पुर्तगाली | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | ### रैंकिंग एल्गोरिदम From d1bae11e1fc4a2d3c36b7afd5b36032a6041ceef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:56 -0500 Subject: [PATCH 0868/1534] New translations subgraph-manifest.mdx (Romanian) --- .../ro/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ro/subgraphs/developing/creating/subgraph-manifest.mdx index b4c0e467c780..a42a50973690 100644 --- a/website/src/pages/ro/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ro/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Data Source for the Main Contract -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From ed7b0eabfc5638337a737215c30f7eedcb4f34e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:57 -0500 Subject: [PATCH 0869/1534] New translations subgraph-manifest.mdx (French) --- .../developing/creating/subgraph-manifest.mdx | 134 +++++++++--------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/fr/subgraphs/developing/creating/subgraph-manifest.mdx index f7f957f5741a..f3b29bd0de75 100644 --- a/website/src/pages/fr/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/fr/subgraphs/developing/creating/subgraph-manifest.mdx @@ -1,20 +1,20 @@ --- -title: Subgraph Manifest +title: Manifeste de Subgraph --- ## Aperçu -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. +Le manifeste du subgraph, `subgraph.yaml`, définit les contrats intelligents et le réseau que votre subgraph va indexer, les événements de ces contrats auxquels il faut prêter attention, et comment faire correspondre les données d'événements aux entités que Graph Node stocke et permet d'interroger. -The **subgraph definition** consists of the following files: +La **définition du subgraph** se compose des fichiers suivants : -- `subgraph.yaml`: Contains the subgraph manifest +- `subgraph.yaml` : Contient le manifeste du subgraph -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL +- `schema.graphql` : Un schéma GraphQL définissant les données stockées pour votre subgraph et comment les interroger via GraphQL -- `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) +- `mapping.ts` : [Mappage AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code qui traduit les données d'événements en entités définies dans votre schéma (par exemple `mapping.ts` dans ce guide) -### Subgraph Capabilities +### Capacités des subgraphs Un seul subgraph peut : @@ -22,11 +22,11 @@ Un seul subgraph peut : - Indexer des données de fichiers IPFS en utilisant des File Data Sources. -- Add an entry for each contract that requires indexing to the `dataSources` array. +- Ajouter une entrée pour chaque contrat nécessitant une indexation dans le tableau `dataSources`. -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +La spécification complète des manifestes de subgraphs est disponible [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph listed above, `subgraph.yaml` is: +Pour l'exemple de subgraph cité ci-dessus, `subgraph.yaml` est : ```yaml version spec : 0.0.4 @@ -77,41 +77,41 @@ les sources de données: fichier : ./src/mapping.ts ``` -## Subgraph Entries +## Entrées de subgraphs -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/subgraphs/developing/creating/ql-schema/). +> Remarque importante : veillez à remplir le manifeste de votre subgraph avec tous les gestionnaires et [entités](/subgraphs/developing/creating/ql-schema/). Les entrées importantes à mettre à jour pour le manifeste sont : -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. +- `specVersion` : une version de semver qui identifie la structure du manifeste et les fonctionnalités supportées pour le subgraph. La dernière version est `1.2.0`. Voir la section [versions de specVersion](#specversion-releases) pour plus de détails sur les fonctionnalités et les versions. -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. +- `description` : une description lisible par l'homme de ce qu'est le subgraph. Cette description est affichée dans Graph Explorer lorsque le subgraph est déployé dans Subgraph Studio. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. +- `repository` : l'URL du dépôt où le manifeste du subgraph peut être trouvé. Cette URL est également affichée dans Graph Explorer. -- `features`: a list of all used [feature](#experimental-features) names. +- `features` : une liste de tous les noms de [fonctionnalités](#experimental-features) utilisés. -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. +- `indexerHints.prune` : Définit la conservation des données de blocs historiques pour un subgraph. Voir [prune](#prune) dans la section [indexerHints](#indexer-hints). -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source` : l'adresse du contrat intelligent dont le subgraph est issu, et l'ABI du contrat intelligent à utiliser. L'adresse est optionnelle ; l'omettre permet d'indexer les événements correspondants de tous les contrats. -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.startBlock` : le numéro optionnel du bloc à partir duquel la source de données commence l'indexation. Dans la plupart des cas, nous suggérons d'utiliser le bloc dans lequel le contrat a été créé. -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. +- `dataSources.source.endBlock` : Le numéro optionnel du bloc sur lequel la source de données arrête l'indexation, y compris ce bloc. Version minimale de la spécification requise : `0.0.9`. -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. +- `dataSources.context` : paires clé-valeur qui peuvent être utilisées dans les mappages de subgraphs. Supporte différents types de données comme `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, et `BigInt`. Chaque variable doit spécifier son `type` et ses `données`. Ces variables de contexte sont ensuite accessibles dans les fichiers de mappage, offrant plus d'options configurables pour le développement de subgraphs. -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. +- `dataSources.mapping.entities` : les entités que la source de données écrit dans le store. Le schéma de chaque entité est défini dans le fichier schema.graphql. -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. +- `dataSources.mapping.abis` : un ou plusieurs fichiers ABI nommés pour le contrat source ainsi que pour tous les autres contrats intelligents avec lesquels vous interagissez à partir des mappages. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers` : liste les événements du contrat intelligent auxquels ce subgraph réagit et les gestionnaires dans le mappage - ./src/mapping.ts dans l'exemple - qui transforment ces événements en entités dans le store. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers` : liste les fonctions de contrat intelligent auxquelles ce subgraph réagit et les handlers dans le mappage qui transforment les entrées et sorties des appels de fonction en entités dans le store. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.blockHandlers` : liste les blocs auxquels ce subgraph réagit et les gestionnaires du mappage à exécuter lorsqu'un bloc est ajouté à la blockchain. Sans filtre, le gestionnaire de bloc sera exécuté à chaque bloc. Un filtre d'appel optionnel peut être fourni en ajoutant un champ `filter` avec `kind : call` au gestionnaire. Ceci ne lancera le gestionnaire que si le bloc contient au moins un appel au contrat de la source de données. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +Un seul subgraph peut indexer des données provenant de plusieurs contrats intelligents. Ajoutez une entrée pour chaque contrat dont les données doivent être indexées dans le tableau `dataSources`. ## Gestionnaires d'événements @@ -149,15 +149,15 @@ dataSources: ## Gestionnaires d'appels -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +Si les événements constituent un moyen efficace de collecter les modifications pertinentes de l'état d'un contrat, de nombreux contrats évitent de générer des logs afin d'optimiser les coûts de gaz. Dans ce cas, un subgraph peut s'abonner aux appels faits au contrat de source de données. Pour ce faire, il suffit de définir des gestionnaires d'appels faisant référence à la signature de la fonction et au gestionnaire de mappage qui traitera les appels à cette fonction. Pour traiter ces appels, le gestionnaire de mappage recevra un `ethereum.Call` comme argument avec les entrées et sorties typées de l'appel. Les appels effectués à n'importe quel niveau de la blockchain d'appels d'une transaction déclencheront le mappage, ce qui permettra de capturer l'activité avec le contrat de source de données par le biais de contrats proxy. Les gestionnaires d'appels ne se déclencheront que dans l'un des deux cas suivants : lorsque la fonction spécifiée est appelée par un compte autre que le contrat lui-même ou lorsqu'elle est marquée comme externe dans Solidity et appelée dans le cadre d'une autre fonction du même contrat. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Note:** Les gestionnaires d'appels dépendent actuellement de l'API de traçage de Parité. Certains réseaux, tels que BNB chain et Arbitrum, ne supportent pas cette API. Si un subgraph indexant l'un de ces réseaux contient un ou plusieurs gestionnaires d'appels, il ne commencera pas à se synchroniser. Les développeurs de subgraphs devraient plutôt utiliser des gestionnaires d'événements. Ceux-ci sont bien plus performants que les gestionnaires d'appels et sont pris en charge par tous les réseaux evm. ### Définir un gestionnaire d'appels -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. +Pour définir un gestionnaire d'appel dans votre manifeste, ajoutez simplement un tableau `callHandlers` sous la source de données à laquelle vous souhaitez vous abonner. ```yaml dataSources: @@ -182,11 +182,11 @@ dataSources: handler: handleCreateGravatar ``` -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. +La propriété `function` est la signature de la fonction normalisée pour filtrer les appels. La propriété `handler` est le nom de la fonction dans votre mappage que vous souhaitez exécuter lorsque la fonction cible est appelée dans le contrat de la source de données. ### Fonction de cartographie -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Chaque gestionnaire d'appel prend un seul paramètre qui a un type correspondant au nom de la fonction appelée. Dans l'exemple du subgraph ci-dessus, le mapping contient un gestionnaire d'appel lorsque la fonction `createGravatar` est appelée et reçoit un paramètre `CreateGravatarCall` en tant qu'argument : ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -201,7 +201,7 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { } ``` -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. +La fonction `handleCreateGravatar` prend un nouveau `CreateGravatarCall` qui est une sous-classe de `ethereum.Call`, fournie par `@graphprotocol/graph-ts`, qui inclut les entrées et sorties typées de l'appel. Le type `CreateGravatarCall` est généré pour vous lorsque vous lancez `graph codegen`. ## Block Handlers @@ -216,9 +216,9 @@ filter: kind: call ``` -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ +_Le gestionnaire défini sera appelé une fois pour chaque bloc qui contient un appel au contrat (source de données) sous lequel le gestionnaire est défini._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Note:** Le filtre `call` dépend actuellement de l'API de traçage de Parité. Certains réseaux, tels que BNB chain et Arbitrum, ne supportent pas cette API. Si un subgraph indexant un de ces réseaux contient un ou plusieurs gestionnaire de bloc avec un filtre `call`, il ne commencera pas à se synchroniser. L'absence de filtre pour un gestionnaire de bloc garantira que le gestionnaire est appelé à chaque bloc. Une source de données ne peut contenir qu'un seul gestionnaire de bloc pour chaque type de filtre. @@ -249,9 +249,9 @@ dataSources: #### Filtre d'interrogation -> **Requires `specVersion` >= 0.0.8** +> **Nécessite `specVersion` >= 0.0.8** > -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. +> **Note:** Les filtres d'interrogation ne sont disponibles que sur les dataSources de `kind : ethereum`. ```yaml blockHandlers: @@ -261,13 +261,13 @@ blockHandlers: every: 10 ``` -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. +Le gestionnaire défini sera appelé une fois tous les `n` blocs, où `n` est la valeur fournie dans le champ `every`. Cette configuration permet au subgraph d'effectuer des opérations spécifiques à intervalles réguliers. #### Le filtre Once -> **Requires `specVersion` >= 0.0.8** +> **Nécessite `specVersion` >= 0.0.8** > -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. +> **Note:** Les filtres Once ne sont disponibles que sur les dataSources de `kind : ethereum`. ```yaml blockHandlers: @@ -288,7 +288,7 @@ export function handleOnce(block: ethereum.Block): void { ### Fonction de cartographie -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +La fonction de mappage recevra une `ethereum.Block` comme seul argument. Comme les fonctions de mappage pour les événements, cette fonction peut accéder aux entités de subgraphs existantes dans le store, appeler des contrats intelligents et créer ou mettre à jour des entités. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -311,13 +311,13 @@ eventHandlers: handler: handleGive ``` -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. +Un événement ne sera déclenché que si la signature et le sujet 0 correspondent. Par défaut, `topic0` est égal au hash de la signature de l'événement. ## Reçus de transaction dans les gestionnaires d'événements -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. +A partir de `specVersion` `0.0.5` et `apiVersion` `0.0.7`, les gestionnaires d'événements peuvent avoir accès au reçu de la transaction qui les a émis. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +Pour ce faire, les gestionnaires d'événements doivent être déclarés dans le manifeste du subgraph avec la nouvelle clé `receipt : true`, qui est facultative et prend par défaut la valeur false. ```yaml eventHandlers: @@ -326,7 +326,7 @@ eventHandlers: receipt: true ``` -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. +Dans la fonction handler, le reçu peut être accédé dans le champ `Event.receipt`. Si la clé `receipt` est fixée à `false` ou omise dans le manifeste, une valeur `null` sera renvoyée à la place. ## Ordre de déclenchement des gestionnaires @@ -338,17 +338,17 @@ Les déclencheurs d'une source de données au sein d'un bloc sont classés à l' Ces règles de commande sont susceptibles de changer. -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. +> **Note:** Lorsque de nouvelles [sources de données dynamiques] (#data-source-templates-for-dynamically-created-contracts) sont créées, les gestionnaires définis pour les sources de données dynamiques ne commenceront à être traités qu'une fois que tous les gestionnaires de sources de données existants auront été traités, et se répéteront dans la même séquence chaque fois qu'ils seront déclenchés. ## Modèles de sources de données Un modèle courant dans les contrats intelligents compatibles EVM est l'utilisation de contrats de registre ou d'usine, dans lesquels un contrat crée, gère ou référence un nombre arbitraire d'autres contrats qui ont chacun leur propre état et leurs propres événements. -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. +Les adresses de ces sous-contrats peuvent ou non être connues à l'avance et nombre de ces contrats peuvent être créés et/ou ajoutés au fil du temps. C'est pourquoi, dans de tels cas, la définition d'une source de données unique ou d'un nombre fixe de sources de données est impossible et une approche plus dynamique est nécessaire : les _modèles de sources de données_. ### Source de données pour le contrat principal -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +Tout d'abord, vous devez définir une source de données régulière pour le contrat principal. L'extrait ci-dessous montre un exemple simplifié de source de données pour le contrat factory de l'exchange [Uniswap](https://uniswap.org). Notez le gestionnaire d'événement `NewExchange(address,address)`. Il est émis lorsqu'un nouveau contrat d'exchange est créé onchain par le contrat factory. ```yaml dataSources: @@ -375,7 +375,7 @@ dataSources: ### Modèles de source de données pour les contrats créés dynamiquement -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. +Ensuite, vous ajoutez des _modèles de sources de données_ au manifeste. Ceux-ci sont identiques aux sources de données classiques, sauf qu'ils n'ont pas d'adresse de contrat prédéfinie sous `source`. Typiquement, vous devriez définir un modèle pour chaque type de sous-contrat géré ou référencé par le contrat parent. ```yaml dataSources: @@ -411,7 +411,7 @@ templates: ### Instanciation d'un modèle de source de données -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. +Dans la dernière étape, vous mettez à jour votre mappage du contrat principal pour créer une instance de source de données dynamique à partir de l'un des modèles. Dans cet exemple, vous modifiez le mappage du contrat principal pour importer le modèle `Exchange` et appeler la méthode `Exchange.create(address)` pour commencer à indexer le nouveau contrat d'exchange. ```typescript import { Exchange } from '../generated/templates' @@ -423,13 +423,13 @@ export function handleNewExchange(event: NewExchange): void { } ``` -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> **Note:** Une nouvelle source de données ne traitera que les appels et les événements du bloc dans lequel elle a été créée et de tous les blocs suivants, mais ne traitera pas les données historiques, c'est-à-dire les données contenues dans les blocs précédents. > > Si les blocs précédents contiennent des données pertinentes pour la nouvelle source de données, il est préférable d'indexer ces données en lisant l'état actuel du contrat et en créant des entités représentant cet état au moment de la création de la nouvelle source de données. ### Data Source Context -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: +Les contextes de source de données permettent de passer une configuration supplémentaire lors de l'instanciation d'un modèle. Dans notre exemple, disons que les éxchanges sont associés à une paire de trading particulière, qui est incluse dans l'événement `NewExchange`. Cette information peut être passée dans la source de données instanciée, comme suit : ```typescript import { Exchange } from '../generated/templates' @@ -441,7 +441,7 @@ export function handleNewExchange(event: NewExchange): void { } ``` -Inside a mapping of the `Exchange` template, the context can then be accessed: +A l'intérieur d'un mappage du modèle `Exchange`, il est possible d'accéder au contexte : ```typescript import { dataSource } from '@graphprotocol/graph-ts' @@ -450,11 +450,11 @@ let context = dataSource.context() let tradingPair = context.getString('tradingPair') ``` -There are setters and getters like `setString` and `getString` for all value types. +Il existe des setters et getters comme `setString` et `getString` pour tous les types de valeurs. ## Blocs de démarrage -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +Le `startBlock` est un paramètre optionnel qui vous permet de définir à partir de quel bloc de la chaîne la source de données commencera l'indexation. Définir le bloc de départ permet à la source de données de sauter potentiellement des millions de blocs qui ne sont pas pertinents. En règle générale, un développeur de subgraphs définira `startBlock` au bloc dans lequel le contrat intelligent de la source de données a été créé. ```yaml dataSources: @@ -480,24 +480,24 @@ dataSources: handler: handleNewEvent ``` -> **Note:** The contract creation block can be quickly looked up on Etherscan: +> **Note:** Le bloc de création du contrat peut être consulté rapidement sur Etherscan : > > 1. Recherchez le contrat en saisissant son adresse dans la barre de recherche. -> 2. Click on the creation transaction hash in the `Contract Creator` section. +> 2. Cliquez sur le hash de la transaction de création dans la section `Contract Creator`. > 3. Chargez la page des détails de la transaction où vous trouverez le bloc de départ de ce contrat. ## Conseils pour l'indexeur -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. +Le paramètre `indexerHints` dans le manifeste d'un subgraph fournit des directives aux Indexeurs sur le traitement et la gestion d'un subgraph. Il influence les décisions opérationnelles concernant le traitement des données, les stratégies d'indexation et les optimisations. Actuellement, il propose l'option `prune` pour gérer la rétention ou suppression des données historiques. -> This feature is available from `specVersion: 1.0.0` +> Cette fonctionnalité est disponible à partir de `specVersion : 1.0.0` ### Prune -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: +`indexerHints.prune` : Définit la rétention des données de blocs historiques pour un subgraph. Les options sont les suivantes : -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. +1. `"never"`: Aucune suppression des données historiques ; conserve l'ensemble de l'historique. +2. `"auto"`: Conserve l'historique minimum nécessaire tel que défini par l'Indexeur, optimisant ainsi les performances de la requête. 3. Un nombre spécifique : Fixe une limite personnalisée au nombre de blocs historiques à conserver. ``` @@ -509,21 +509,21 @@ The `indexerHints` setting in a subgraph's manifest provides directives for inde L'historique à partir d'un bloc donné est requis pour : -- [Time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block +- Les [requêtes chronologiques](/subgraphs/querying/graphql-api/#time-travel-queries), qui permettent d'interroger les états passés de ces entités à des moments précis de l'histoire du subgraph +- Utilisation du subgraph comme [base de greffage](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) dans un autre subgraph, à ce bloc - Rembobiner le subgraph jusqu'à ce bloc Si les données historiques à partir du bloc ont été purgées, les capacités ci-dessus ne seront pas disponibles. -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. +> L'utilisation de `"auto"` est généralement recommandée car elle maximise les performances des requêtes et est suffisante pour la plupart des utilisateurs qui n'ont pas besoin d'accéder à des données historiques étendues. -For subgraphs leveraging [time travel queries](/subgraphs/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: +Pour les subgraphs exploitant les [requêtes chronologiques](/subgraphs/querying/graphql-api/#time-travel-queries), il est conseillé de définir un nombre spécifique de blocs pour la conservation des données historiques ou d'utiliser `prune: never` pour conserver tous les états d'entité historiques. Vous trouverez ci-dessous des exemples de configuration des deux options dans les paramètres de votre subgraphs : Pour conserver une quantité spécifique de données historiques : ``` indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain + prune: 1000 # Remplacez 1000 par le nombre de blocs souhaité à conserver ``` Préserver l'histoire complète des États de l'entité : From 6df92e2cb3e775a12bad661659f2818aece82e24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:58 -0500 Subject: [PATCH 0870/1534] New translations subgraph-manifest.mdx (Spanish) --- .../es/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/es/subgraphs/developing/creating/subgraph-manifest.mdx index 2f8be52bbabf..c825906fef29 100644 --- a/website/src/pages/es/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/es/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Fuente de Datos para el Contrato Principal -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 5caadbf6e649ccba9f4d9ffd3e79605428c0a23d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 12:59:59 -0500 Subject: [PATCH 0871/1534] New translations subgraph-manifest.mdx (Arabic) --- .../ar/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ar/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ar/subgraphs/developing/creating/subgraph-manifest.mdx index 16d13269fb85..ba893838ca4e 100644 --- a/website/src/pages/ar/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ar/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### مصدر البيانات للعقد الرئيسي -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From b2634f2febab8447722a5e1bd63304c9fd31ade9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:00 -0500 Subject: [PATCH 0872/1534] New translations subgraph-manifest.mdx (Czech) --- .../cs/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/cs/subgraphs/developing/creating/subgraph-manifest.mdx index 705df5167126..a434110b4282 100644 --- a/website/src/pages/cs/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/cs/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Zdroj dat pro hlavní smlouvu -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 3f104e87f1d1d4e34fa4bf056390cf6f676b5d8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:01 -0500 Subject: [PATCH 0873/1534] New translations subgraph-manifest.mdx (German) --- .../de/subgraphs/developing/creating/subgraph-manifest.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/de/subgraphs/developing/creating/subgraph-manifest.mdx index 925c81a8b98b..a3959f1f4d57 100644 --- a/website/src/pages/de/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/de/subgraphs/developing/creating/subgraph-manifest.mdx @@ -2,7 +2,7 @@ title: Subgraph Manifest --- -## Overview +## Überblick Das Subgraph-Manifest, `subgraph.yaml`, definiert die Smart Contracts und das Netzwerk, die Ihr Subgraph indizieren wird, die Ereignisse aus diesen Verträgen, auf die geachtet werden soll, und wie die Ereignisdaten auf Entitäten abgebildet werden, die Graph Node speichert und abfragen kann. @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Data Source for the Main Contract -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 81f76ee87fb34cc4638740465d5fb935c152089f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:03 -0500 Subject: [PATCH 0874/1534] New translations subgraph-manifest.mdx (Italian) --- .../it/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/it/subgraphs/developing/creating/subgraph-manifest.mdx index 559f67852565..d8b9c415b293 100644 --- a/website/src/pages/it/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/it/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Data Source per il contratto principale -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From c5df87d974374eb6e8625ca25fe8887d5001a87f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:10 -0500 Subject: [PATCH 0875/1534] New translations subgraph-manifest.mdx (Japanese) --- .../ja/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ja/subgraphs/developing/creating/subgraph-manifest.mdx index 31095cac4850..1fc82b54930d 100644 --- a/website/src/pages/ja/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ja/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### メインコントラクトのデータソース -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From babff8dc0a6db9e0543f7865e6cfdeb977af966b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:13 -0500 Subject: [PATCH 0876/1534] New translations subgraph-manifest.mdx (Korean) --- .../ko/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ko/subgraphs/developing/creating/subgraph-manifest.mdx index b4c0e467c780..a42a50973690 100644 --- a/website/src/pages/ko/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ko/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Data Source for the Main Contract -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 69f3b76f49858b90948229e7a876053f12ee0f53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:15 -0500 Subject: [PATCH 0877/1534] New translations subgraph-manifest.mdx (Dutch) --- .../nl/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/nl/subgraphs/developing/creating/subgraph-manifest.mdx index b4c0e467c780..a42a50973690 100644 --- a/website/src/pages/nl/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/nl/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Data Source for the Main Contract -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 47e9f6d0bab6b110c4540610bfca0738c3bb69e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:16 -0500 Subject: [PATCH 0878/1534] New translations subgraph-manifest.mdx (Polish) --- .../pl/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/pl/subgraphs/developing/creating/subgraph-manifest.mdx index b4c0e467c780..a42a50973690 100644 --- a/website/src/pages/pl/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/pl/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Data Source for the Main Contract -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 0b7cfc859789fa6c81a56ec4603af731c56d4b88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:17 -0500 Subject: [PATCH 0879/1534] New translations subgraph-manifest.mdx (Portuguese) --- .../pt/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/pt/subgraphs/developing/creating/subgraph-manifest.mdx index fdd7f1cba98e..2a4c3af44fe4 100644 --- a/website/src/pages/pt/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/pt/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Fonte de Dados para o Contrato Principal -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 905155d626a757bf2e04f37d893b846618a54eda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:18 -0500 Subject: [PATCH 0880/1534] New translations subgraph-manifest.mdx (Russian) --- .../ru/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ru/subgraphs/developing/creating/subgraph-manifest.mdx index 7945d01c1c11..a8f1a728f47a 100644 --- a/website/src/pages/ru/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ru/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Источник данных для основного контракта -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 2726e8cf77461fc59c274fb2619c6cd9f3a5cdac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:20 -0500 Subject: [PATCH 0881/1534] New translations subgraph-manifest.mdx (Swedish) --- .../developing/creating/subgraph-manifest.mdx | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/sv/subgraphs/developing/creating/subgraph-manifest.mdx index 7a8e3a77765f..4111e4def981 100644 --- a/website/src/pages/sv/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/sv/subgraphs/developing/creating/subgraph-manifest.mdx @@ -165,7 +165,7 @@ dataSources: name: Gravity network: mainnet source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + address: "0x731a10897d267e19b34503ad902d0a29173ba4b1" abi: Gravity mapping: kind: ethereum/events @@ -189,15 +189,15 @@ The `function` is the normalized function signature to filter calls by. The `han Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: ```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' +import { CreateGravatarCall } from "../generated/Gravity/Gravity"; +import { Transaction } from "../generated/schema"; export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() + let id = call.transaction.hash; + let transaction = new Transaction(id); + transaction.displayName = call.inputs._displayName; + transaction.imageUrl = call.inputs._imageUrl; + transaction.save(); } ``` @@ -228,7 +228,7 @@ dataSources: name: Gravity network: dev source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + address: "0x731a10897d267e19b34503ad902d0a29173ba4b1" abi: Gravity mapping: kind: ethereum/events @@ -280,9 +280,9 @@ Den definierade hanteraren med filtret once kommer att anropas endast en gång i ```ts export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() + let data = new InitialData(Bytes.fromUTF8("initial")); + data.data = "Setup data here"; + data.save(); } ``` @@ -291,12 +291,12 @@ export function handleOnce(block: ethereum.Block): void { The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. ```typescript -import { ethereum } from '@graphprotocol/graph-ts' +import { ethereum } from "@graphprotocol/graph-ts"; export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() + let id = block.hash; + let entity = new Block(id); + entity.save(); } ``` @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Datakälla för huvudkontraktet -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: @@ -356,7 +356,7 @@ dataSources: name: Factory network: mainnet source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + address: "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95" abi: Factory mapping: kind: ethereum/events @@ -414,12 +414,12 @@ templates: In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. ```typescript -import { Exchange } from '../generated/templates' +import { Exchange } from "../generated/templates"; export function handleNewExchange(event: NewExchange): void { // Start indexing the exchange; `event.params.exchange` is the // address of the new exchange contract - Exchange.create(event.params.exchange) + Exchange.create(event.params.exchange); } ``` @@ -432,22 +432,22 @@ export function handleNewExchange(event: NewExchange): void { Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: ```typescript -import { Exchange } from '../generated/templates' +import { Exchange } from "../generated/templates"; export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) + let context = new DataSourceContext(); + context.setString("tradingPair", event.params.tradingPair); + Exchange.createWithContext(event.params.exchange, context); } ``` Inside a mapping of the `Exchange` template, the context can then be accessed: ```typescript -import { dataSource } from '@graphprotocol/graph-ts' +import { dataSource } from "@graphprotocol/graph-ts"; -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') +let context = dataSource.context(); +let tradingPair = context.getString("tradingPair") ``` There are setters and getters like `setString` and `getString` for all value types. @@ -462,7 +462,7 @@ dataSources: name: ExampleSource network: mainnet source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + address: "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95" abi: ExampleContract startBlock: 6627917 mapping: From 369f4788e6590d778fbced04fceea2f7556bc965 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:21 -0500 Subject: [PATCH 0882/1534] New translations subgraph-manifest.mdx (Turkish) --- .../tr/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/tr/subgraphs/developing/creating/subgraph-manifest.mdx index 2853b063257d..88693e796ef6 100644 --- a/website/src/pages/tr/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/tr/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Ana Sözleşme için Veri Kaynağı -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 87f9959ead7786b79efee86455c8d4d5f29529fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:22 -0500 Subject: [PATCH 0883/1534] New translations subgraph-manifest.mdx (Ukrainian) --- .../uk/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/uk/subgraphs/developing/creating/subgraph-manifest.mdx index b4c0e467c780..a42a50973690 100644 --- a/website/src/pages/uk/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/uk/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Data Source for the Main Contract -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 691724519d3ce847d9faaf09cdf1c8bc4d9dcc64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:23 -0500 Subject: [PATCH 0884/1534] New translations subgraph-manifest.mdx (Chinese Simplified) --- .../zh/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/zh/subgraphs/developing/creating/subgraph-manifest.mdx index 9e2913c97e75..486f06a4c248 100644 --- a/website/src/pages/zh/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/zh/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### 主合约的数据源 -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 885a414c3b5249b6fde2078a7d504c9097a19469 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:24 -0500 Subject: [PATCH 0885/1534] New translations subgraph-manifest.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/ur/subgraphs/developing/creating/subgraph-manifest.mdx index 106d985092ad..de8a303b302d 100644 --- a/website/src/pages/ur/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/ur/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### مرکزی کنٹریکٹ کے لیے ڈیٹا سورس -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From aba2efbbe816d509e7a3b65676ef4c3fe92ea8e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:25 -0500 Subject: [PATCH 0886/1534] New translations subgraph-manifest.mdx (Vietnamese) --- .../vi/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/vi/subgraphs/developing/creating/subgraph-manifest.mdx index cd250d296ca8..01ca69dbcd4b 100644 --- a/website/src/pages/vi/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/vi/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### Nguồn Dữ liệu cho Hợp đồng Chính -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From a67a7a63f61c5cbb3af347ed0bd36d6d64208304 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:26 -0500 Subject: [PATCH 0887/1534] New translations subgraph-manifest.mdx (Marathi) --- .../mr/subgraphs/developing/creating/subgraph-manifest.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/mr/subgraphs/developing/creating/subgraph-manifest.mdx index 74f2af02b12e..a09668000af7 100644 --- a/website/src/pages/mr/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/mr/subgraphs/developing/creating/subgraph-manifest.mdx @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### मुख्य करारासाठी डेटा स्रोत -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 43dd384ee93cef46007f838d9de134056d76c100 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:27 -0500 Subject: [PATCH 0888/1534] New translations subgraph-manifest.mdx (Hindi) --- .../hi/subgraphs/developing/creating/subgraph-manifest.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/creating/subgraph-manifest.mdx b/website/src/pages/hi/subgraphs/developing/creating/subgraph-manifest.mdx index 0ed327ae8b8f..9315b0d0b218 100644 --- a/website/src/pages/hi/subgraphs/developing/creating/subgraph-manifest.mdx +++ b/website/src/pages/hi/subgraphs/developing/creating/subgraph-manifest.mdx @@ -20,7 +20,7 @@ subgraph मैनिफेस्ट, subgraph.yaml, उन स्मार् - कई स्मार्ट कॉन्ट्रैक्ट्स से डेटा को इंडेक्स करें (लेकिन कई नेटवर्क नहीं)। -- IPFS फ़ाइलों से डेटा को डेटा स्रोत फ़ाइलें का उपयोग करके अनुक्रमित करें। +- IPFS फ़ाइलों से डेटा को डेटा स्रोत फ़ाइलें का उपयोग करके अनुक्रमित करें। - Add an entry for each contract that requires indexing to the `dataSources` array. @@ -348,7 +348,7 @@ The addresses of these sub-contracts may or may not be known upfront and many of ### मुख्य अनुबंध के लिए डेटा स्रोत -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created onchain by the factory contract. ```yaml dataSources: From 32b19fd25a0b5c465b226b77f1379fc5774c746d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:29 -0500 Subject: [PATCH 0889/1534] New translations multiple-networks.mdx (French) --- .../fr/subgraphs/developing/deploying/multiple-networks.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/fr/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/fr/subgraphs/developing/deploying/multiple-networks.mdx index 26e6edf71235..a72771045069 100644 --- a/website/src/pages/fr/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/fr/subgraphs/developing/deploying/multiple-networks.mdx @@ -2,7 +2,7 @@ title: Déploiement d'un subgraph sur plusieurs réseaux --- -Cette page explique comment déployer un subgraph sur plusieurs réseaux. Pour déployer un subgraph, vous devez premièrement installer le [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). Si vous n'avez pas encore créé de subgraph, consultez [Creation d'un subgraph](/developing/creating-a-subgraph/). +Cette page explique comment déployer un subgraph sur plusieurs réseaux. Pour déployer un subgraph, vous devez d'abord installer [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). Si vous n'avez pas encore créé de subgraph, consultez [Créer un subgraph](/developing/creating-a-subgraph/). ## Déploiement du subgraph sur plusieurs réseaux From f4e87821814dad96e7ef681f3876ebc1bbbe9f81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:32 -0500 Subject: [PATCH 0890/1534] New translations multiple-networks.mdx (German) --- .../deploying/multiple-networks.mdx | 96 +++++++++---------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/de/subgraphs/developing/deploying/multiple-networks.mdx index 4f7dcd3864e8..7bc4c42301c5 100644 --- a/website/src/pages/de/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/de/subgraphs/developing/deploying/multiple-networks.mdx @@ -1,37 +1,37 @@ --- -title: Deploying a Subgraph to Multiple Networks +title: Bereitstellen eines Subgraphen in mehreren Netzen --- -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). +Auf dieser Seite wird erklärt, wie man einen Subgraphen in mehreren Netzwerken bereitstellt. Um einen Subgraphen bereitzustellen, müssen Sie zunächst die [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) installieren. Wenn Sie noch keinen Subgraphen erstellt haben, lesen Sie [Erstellen eines Subgraphen](/developing/creating-a-subgraph/). -## Deploying the subgraph to multiple networks +## Breitstellen des Subgraphen in mehreren Netzen -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +In manchen Fällen möchten Sie denselben Subgraph in mehreren Netzen bereitstellen, ohne den gesamten Code zu duplizieren. Die größte Herausforderung dabei ist, dass die Vertragsadressen in diesen Netzen unterschiedlich sind. -### Using `graph-cli` +### Verwendung von `graph-cli` -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: +Sowohl `graph build` (seit `v0.29.0`) als auch `graph deploy` (seit `v0.32.0`) akzeptieren zwei neue Optionen: ```sh -Options: +Optionen: ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") + --network Netzwerkkonfiguration, die aus der Netzwerkkonfigurationsdatei verwendet werden soll + --network-file Netzwerkkonfigurationsdateipfad (Standard: „./networks.json“) ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +Sie können die Option `--network` verwenden, um eine Netzwerkkonfiguration aus einer `json`-Standarddatei (standardmäßig `networks.json`) anzugeben, um Ihren Subgraphen während der Entwicklung einfach zu aktualisieren. -> Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. +> Hinweis: Der Befehl `init` generiert nun automatisch eine `networks.json` auf der Grundlage der angegebenen Informationen. Sie können dann bestehende Netzwerke aktualisieren oder zusätzliche Netzwerke hinzufügen. -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: +Wenn Sie keine \`networks.json'-Datei haben, müssen Sie manuell eine Datei mit der folgenden Struktur erstellen: ```json { - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) + "network1": { // der Netzwerkname + "dataSource1": { // der dataSource-Name + "address": "0xabc...", // die Vertragsadresse (optional) + "startBlock": 123456 // der startBlock (optional) }, "dataSource2": { "address": "0x123...", @@ -52,9 +52,9 @@ If you don't have a `networks.json` file, you'll need to manually create one wit } ``` -> Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. +> Hinweis: Sie müssen keine `templates` (falls Sie welche haben) in der Konfigurationsdatei angeben, nur die `dataSources`. Wenn in der Datei `subgraph.yaml` irgendwelche `templates` deklariert sind, wird ihr Netzwerk automatisch auf das mit der Option `--network` angegebene aktualisiert. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: +Nehmen wir an, Sie möchten Ihren Subgraphen in den Netzwerken `mainnet` und `sepolia` einsetzen, und dies ist Ihre `subgraph.yaml`: ```yaml # ... @@ -69,7 +69,7 @@ dataSources: kind: ethereum/events ``` -This is what your networks config file should look like: +So sollte Ihre Netzwerkkonfigurationsdatei aussehen: ```json { @@ -86,17 +86,17 @@ This is what your networks config file should look like: } ``` -Now we can run one of the following commands: +Jetzt können wir einen der folgenden Befehle ausführen: ```sh -# Using default networks.json file +# Verwendung der Standarddatei networks.json yarn build --network sepolia -# Using custom named file +# Verwendung einer benutzerdefinierten Datei yarn build --network sepolia --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +Der Befehl `build` aktualisiert die Datei `subgraph.yaml` mit der `sepolia`-Konfiguration und kompiliert den Subgraphen neu. Ihre `subgraph.yaml` Datei sollte nun wie folgt aussehen: ```yaml # ... @@ -111,23 +111,23 @@ dataSources: kind: ethereum/events ``` -Now you are ready to `yarn deploy`. +Jetzt sind Sie bereit für `yarn deploy`. -> Note: As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: +> Anmerkung: Wie bereits erwähnt, können Sie seit `graph-cli 0.32.0` direkt `yarn deploy` mit der Option `--network` ausführen: ```sh -# Using default networks.json file +# Verwendung der Standarddatei networks.json yarn deploy --network sepolia -# Using custom named file +# Verwendung einer benutzerdefinierten Datei yarn deploy --network sepolia --network-file path/to/config ``` -### Using subgraph.yaml template +### Verwendung der Vorlage subgraph.yaml -One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). +Eine Möglichkeit, Aspekte wie Vertragsadressen mit älteren `graph-cli` Versionen zu parametrisieren, besteht darin, Teile davon mit einem Templating-System wie [Mustache](https://mustache.github.io/) oder [Handlebars](https://handlebarsjs.com/) zu generieren. -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: +Zur Veranschaulichung dieses Ansatzes nehmen wir an, dass ein Subgraph im Mainnet und in Sepolia mit unterschiedlichen Vertragsadressen bereitgestellt werden soll. Sie könnten dann zwei Konfigurationsdateien definieren, die die Adressen für jedes Netz bereitstellen: ```json { @@ -136,7 +136,7 @@ To illustrate this approach, let's assume a subgraph should be deployed to mainn } ``` -and +und ```json { @@ -145,7 +145,7 @@ and } ``` -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: +Dazu ersetzen Sie den Netzwerknamen und die Adressen im Manifest durch die variablen Platzhalter `{{network}}` und `{{address}}` und benennen Sie das Manifest z.B. in `subgraph.template.yaml` um: ```yaml # ... @@ -162,7 +162,7 @@ dataSources: kind: ethereum/events ``` -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: +Um ein Manifest für eines der beiden Netzwerke zu erstellen, können Sie zwei zusätzliche Befehle in die `package.json` einfügen, zusammen mit einer Abhängigkeit von `mustache`: ```json { @@ -179,7 +179,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: +Um diesen Subgraphen für Mainnet oder Sepolia einzusetzen, führen Sie nun einfach einen der beiden folgenden Befehle aus: ```sh # Mainnet: @@ -189,29 +189,29 @@ yarn prepare:mainnet && yarn deploy yarn prepare:sepolia && yarn deploy ``` -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). +Ein funktionierendes Datenbeispiel hierfür finden Sie [hier](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. +**Hinweis:** Dieser Ansatz kann auch auf komplexere Situationen angewandt werden, in denen es notwendig ist, mehr als nur Vertragsadressen und Netzwerknamen zu ersetzen, oder in denen auch Mappings oder ABIs aus Vorlagen erzeugt werden. -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +Dies gibt Ihnen den `chainHeadBlock`, den Sie mit dem `latestBlock` Ihres Subgraphen vergleichen können, um zu prüfen, ob er im Rückstand ist. `synced` gibt Auskunft darüber, ob der Subgraph jemals zur Kette aufgeschlossen hat. `health` kann derzeit die Werte `healthy` annehmen, wenn keine Fehler aufgetreten sind, oder `failed`, wenn es einen Fehler gab, der den Fortschritt des Subgraphen aufgehalten hat. In diesem Fall können Sie das Feld `fatalError` auf Details zu diesem Fehler überprüfen. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraphen-Archivierungsrichtlinie -A subgraph version in Studio is archived if and only if it meets the following criteria: +Eine Subgraph-Version in Studio wird nur dann archiviert, wenn sie die folgenden Kriterien erfüllt: -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days +- Die Version ist nicht im Netz veröffentlicht (oder steht zur Veröffentlichung an) +- Die Version wurde vor 45 oder mehr Tagen erstellt +- Der Subgraph ist seit 30 Tagen nicht mehr abgefragt worden -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. +Wenn eine neue Version bereitgestellt wird und der Subgraph noch nicht veröffentlicht wurde, wird außerdem die Version N-2 des Subgraphen archiviert. -Every subgraph affected with this policy has an option to bring the version in question back. +Jeder Subgraph, der von dieser Richtlinie betroffen ist, hat die Möglichkeit, die betreffende Version zurückzubringen. -## Checking subgraph health +## Überprüfung des Zustands eines Subgraphen -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +Wenn ein Subgraph erfolgreich synchronisiert wird, ist das ein gutes Zeichen dafür, dass er für immer gut laufen wird. Neue Auslöser im Netzwerk könnten jedoch dazu führen, dass Ihr Subgraph auf eine ungetestete Fehlerbedingung stößt, oder er könnte aufgrund von Leistungsproblemen oder Problemen mit den Knotenbetreibern ins Hintertreffen geraten. -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph Node stellt einen GraphQL-Endpunkt zur Verfügung, den Sie abfragen können, um den Status Ihres Subgraphen zu überprüfen. Auf dem gehosteten Dienst ist er unter `https://api.thegraph.com/index-node/graphql` verfügbar. Auf einem lokalen Knoten ist er standardmäßig auf Port `8030/graphql` verfügbar. Das vollständige Schema für diesen Endpunkt finden Sie [hier](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Hier ist ein Datenbeispiel für eine Abfrage, die den Status der aktuellen Version eines Subgraphen überprüft: ```graphql { @@ -238,4 +238,4 @@ Graph Node exposes a GraphQL endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +Dies gibt Ihnen den `chainHeadBlock`, den Sie mit dem `latestBlock` Ihres Subgraphen vergleichen können, um zu prüfen, ob er im Rückstand ist. `synced` gibt Auskunft darüber, ob der Subgraph jemals zur Kette aufgeschlossen hat. `health` kann derzeit die Werte `healthy` annehmen, wenn keine Fehler aufgetreten sind, oder `failed`, wenn es einen Fehler gab, der den Fortschritt des Subgraphen aufgehalten hat. In diesem Fall können Sie das Feld `fatalError` auf Details zu diesem Fehler überprüfen. From 53685440ca294bde6bdfeccd27d542a62e9ca6d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:37 -0500 Subject: [PATCH 0891/1534] New translations multiple-networks.mdx (Russian) --- .../ru/subgraphs/developing/deploying/multiple-networks.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/ru/subgraphs/developing/deploying/multiple-networks.mdx index 1a4d1b0567db..8ae55fbd8bcc 100644 --- a/website/src/pages/ru/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/ru/subgraphs/developing/deploying/multiple-networks.mdx @@ -2,7 +2,7 @@ title: Развертывание субграфа в нескольких сетях --- -На этой странице объясняется, как развернуть субграф в нескольких сетях. Чтобы развернуть субграф, сначала установите [Graph CLI] (https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). Если Вы еще не создали субграф, смотрите раздел [Создание субграфа] (/developing/creating-a-subgraph/). +This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). ## Развертывание подграфа в нескольких сетях From 022c7d60206c0e464eef9ee2b6ca643a7b7fe1bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:38 -0500 Subject: [PATCH 0892/1534] New translations multiple-networks.mdx (Turkish) --- .../tr/subgraphs/developing/deploying/multiple-networks.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/tr/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/tr/subgraphs/developing/deploying/multiple-networks.mdx index 34ef341eed20..2241675eac10 100644 --- a/website/src/pages/tr/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/tr/subgraphs/developing/deploying/multiple-networks.mdx @@ -2,7 +2,7 @@ title: Bir Subgraph'i Birden Fazla Ağda Dağıtma --- -Bu sayfa, bir subgraph'i birden fazla ağda nasıl dağıtacağınızı açıklar. Bir subgraph'i dağıtmak için öncelikle [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli)'yi yüklemeniz gerekir. Henüz bir subgraph oluşturmadıysanız, [Subgraph Oluşturma](/developing/creating-a-subgraph/) bölümüne bakın. +This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). ## Subgraph'i Birden Fazla Ağda Dağıtma From 3bb59e9a8027b7b7d0ba98fe51b074e7c7285733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:42 -0500 Subject: [PATCH 0893/1534] New translations multiple-networks.mdx (Hindi) --- .../hi/subgraphs/developing/deploying/multiple-networks.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/subgraphs/developing/deploying/multiple-networks.mdx b/website/src/pages/hi/subgraphs/developing/deploying/multiple-networks.mdx index e4de34cb2268..3e03014aba51 100644 --- a/website/src/pages/hi/subgraphs/developing/deploying/multiple-networks.mdx +++ b/website/src/pages/hi/subgraphs/developing/deploying/multiple-networks.mdx @@ -2,7 +2,7 @@ title: मल्टीपल नेटवर्क्स पर एक Subgraph डिप्लॉय करना --- -यह पृष्ठ कई नेटवर्क पर एक subgraph को डिप्लॉय करने के तरीके को समझाता है। एक subgraph को डिप्लॉय करने के लिए, आपको पहले Graph CLI(https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) स्थापित करना होगा। यदि आपने पहले से एक subgraph नहीं बनाया है, तो Creating a subgraph (/developing/creating-a-subgraph/). देखें। +This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph/). ## सबग्राफ को कई नेटवर्क पर तैनात करना From 6b15dce385f03c695bf5579be50bcca78614a9b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:44 -0500 Subject: [PATCH 0894/1534] New translations subgraph-studio-faq.mdx (French) --- .../subgraphs/developing/deploying/subgraph-studio-faq.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/fr/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 1ace101654f2..10300b3d9ada 100644 --- a/website/src/pages/fr/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/fr/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: Subgraph Studio FAQ ## 1. Qu'est-ce que Subgraph Studio ? -[Subgraph Studio](https://thegraph.com/studio/) est une application pour créer, gérer et publier des subgraphs et des clés API. +[Subgraph Studio](https://thegraph.com/studio/) est une dapp permettant de créer, gérer et publier des subgraphs et des clés API. ## 2. Comment créer une clé API ? @@ -12,7 +12,7 @@ Pour créer une API, allez dans Subgraph Studio et connectez votre portefeuille. ## 3. Puis-je créer plusieurs clés API ? -Oui ! Vous pouvez créer plusieurs clés API pour les utiliser dans différents projets. Consultez le lien [ici](https://thegraph.com/studio/apikeys/). +Oui ! Vous pouvez créer plusieurs clés API à utiliser dans différents projets. Consultez le lien [ici](https://thegraph.com/studio/apikeys/). ## 4. Comment limiter un domaine pour une clé API ? ? @@ -26,6 +26,6 @@ Notez que vous ne pourrez plus voir ou modifier le subgraph dans Studio une fois ## Comment trouver les URL de requête pour les sugraphs si je ne suis pas le développeur du subgraph que je veux utiliser ? -Vous pouvez trouver l'URL de requête de chaque subgraph dans la section Subgraph Details de Graph Explorer. Lorsque vous cliquez sur le bouton "Query", vous serez dirigé vers un volet où vous pourrez voir l'URL de requête du subgraph qui vous intéresse. Vous pouvez ensuite remplacer le placeholder `` par la clé API que vous souhaitez utiliser dans Subgraph Studio. +Vous pouvez trouver l'URL de requête de chaque subgraph dans la section Détails du subgraph de Graph Explorer. Lorsque vous cliquez sur le bouton “Requête”, vous serez redirigé vers un volet dans lequel vous pourrez afficher l'URL de requête du subgraph qui vous intéresse. Vous pouvez ensuite remplacer le placeholder `` par la clé API que vous souhaitez exploiter dans Subgraph Studio. N'oubliez pas que vous pouvez créer une clé API et interroger n'importe quel subgraph publié sur le réseau, même si vous créez vous-même un subgraph. Ces requêtes via la nouvelle clé API, sont des requêtes payantes comme n'importe quelle autre sur le réseau. From 58247863b6641130176088691920a9c8992c3849 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:45 -0500 Subject: [PATCH 0895/1534] New translations subgraph-studio-faq.mdx (Spanish) --- .../es/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/es/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 0c7b9c4610e1..14174cc468bf 100644 --- a/website/src/pages/es/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/es/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: Preguntas Frecuentes sobre Subgraph Studio ## 1. ¿Qué es Subgraph Studio? -[Subgraph Studio](https://thegraph.com/studio/) es una dapp para crear, administrar y publicar subgrafos y claves API. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. ¿Cómo creo una clave API? @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. ¿Puedo crear múltiples claves de API? -¡Sí! Puedes crear varias claves de API para usar en diferentes proyectos. Consulta el enlace [aquí](https://thegraph.com/studio/apikeys/). +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. ¿Cómo restrinjo un dominio para una clave API? From 3ac76dbe3f94d96e6577f251b03fe482050cdd19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:46 -0500 Subject: [PATCH 0896/1534] New translations subgraph-studio-faq.mdx (Czech) --- .../cs/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/cs/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 329cc1056022..a67af0f6505e 100644 --- a/website/src/pages/cs/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/cs/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: FAQs Podgraf Studio ## 1. Co je Podgraf Studio? -[Podgraf Studio](https://thegraph.com/studio/) je aplikace pro vytváření, správu a publikování podgrafů a klíčů API. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. Jak vytvořím klíč API? @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. Mohu vytvořit více klíčů API? -Ano! Můžete si vytvořit více klíčů API a používat je v různých projektech. Podívejte se na odkaz [zde](https://thegraph.com/studio/apikeys/). +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. Jak omezím doménu pro klíč API? From a6933470bac6e9c8ebf6ca77a9f44f6d9e36d247 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:47 -0500 Subject: [PATCH 0897/1534] New translations subgraph-studio-faq.mdx (German) --- .../developing/deploying/subgraph-studio-faq.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/de/subgraphs/developing/deploying/subgraph-studio-faq.mdx index b5b8cede7888..a6e114083fc7 100644 --- a/website/src/pages/de/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/de/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,28 +4,28 @@ title: Subgraph Studio-FAQs ## 1. Was ist Subgraph Studio? -[Subgraph Studio](https://thegraph.com/studio/) ist eine DApp zum Erstellen, Verwalten und Veröffentlichen von Subgrafen und API-Schlüsseln. +[Subgraph Studio] (https://thegraph.com/studio/) ist eine App zur Erstellung, Verwaltung und Veröffentlichung von Subgraphen und API-Schlüsseln. ## 2. Wie erstelle ich einen API-Schlüssel? -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. +Um eine API zu erstellen, navigieren Sie zu Subgraph Studio und verbinden Sie Ihre Wallet. Oben können Sie auf die Registerkarte API-Schlüssel klicken. Dort können Sie einen API-Schlüssel erstellen. ## 3. Kann ich mehrere API-Schlüssel erstellen? -Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). +Ja! Sie können mehrere API-Schlüssel zur Verwendung in verschiedenen Projekten erstellen. Sehen Sie sich den Link [hier] an (https://thegraph.com/studio/apikeys/). -## 4. How do I restrict a domain for an API Key? +## 4. Wie beschränke ich einen API Key auf eine Domain ein? Nachdem Sie einen API-Schlüssel erstellt haben, können Sie im Abschnitt Sicherheit die Domänen definieren, die einen bestimmten API-Schlüssel abfragen können. ## 5. Kann ich meinen Subgrafen an einen anderen Eigentümer übertragen? -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. +Ja, Subgraphen, die auf Arbitrum One veröffentlicht wurden, können auf eine neue Wallet oder eine Multisig übertragen werden. Klicken Sie dazu auf die drei Punkte neben der Schaltfläche „Veröffentlichen“ auf der Detailseite des Subgraphen und wählen Sie „Eigentum übertragen“. Beachten Sie, dass Sie den Subgrafen nach der Übertragung nicht mehr in Studio sehen oder bearbeiten können. -## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? +## 6. Wie finde ich Abfrage-URLs für Subgraphen, wenn ich kein Entwickler des Subgraphen bin, den ich verwenden möchte? -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. +Die Abfrage-URL eines jeden Subgraphen finden Sie im Abschnitt Subgraph Details des Graph Explorers. Wenn Sie auf die Schaltfläche „Abfrage“ klicken, werden Sie zu einem Fenster weitergeleitet, in dem Sie die Abfrage-URL des gewünschten Subgraphen sehen können. Sie können dann den `` Platzhalter durch den API-Schlüssel ersetzen, den Sie in Subgraph Studio verwenden möchten. Denken Sie daran, dass Sie einen API-Schlüssel erstellen und jeden im Netzwerk veröffentlichten Subgrafen abfragen können, auch wenn Sie selbst einen Subgrafen erstellen. Diese Abfragen über den neuen API-Schlüssel sind wie alle anderen im Netzwerk kostenpflichtige Abfragen. From 188fed5d194931db54a72a2969e1b971d12a6473 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:48 -0500 Subject: [PATCH 0898/1534] New translations subgraph-studio-faq.mdx (Italian) --- .../it/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/it/subgraphs/developing/deploying/subgraph-studio-faq.mdx index cb695832e258..66453e221c08 100644 --- a/website/src/pages/it/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/it/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: FAQ di Subgraph Studio ## 1. Che cos'è Subgraph Studio? -[Subgraph Studio](https://thegraph.com/studio/) è una dapp per creare, gestire e pubblicare subgraph e chiavi API. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. Come si crea una chiave API? @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. Posso creare più chiavi API? -Sì, è possibile creare più chiavi API da utilizzare in diversi progetti. Scoprire sul link [qui](https://thegraph.com/studio/apikeys/). +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. Come si limita un dominio per una chiave API? From b2a2efc51e9670fb725f82adaf02fb49dfd578e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:49 -0500 Subject: [PATCH 0899/1534] New translations subgraph-studio-faq.mdx (Japanese) --- .../ja/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/ja/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 18fa824a4d06..5810742c4ec4 100644 --- a/website/src/pages/ja/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/ja/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: サブグラフスタジオFAQ ## 1. サブグラフスタジオとは? -[Subgraph Studio](https://thegraph.com/studio/)は、サブグラフやAPIキーを作成・管理・公開するためのDappであり、サブグラフの作成・管理・公開を行う。 +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. API キーを作成するにはどうすればよいですか? @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. 複数の API キーを作成できますか? -A: はい、できます。異なるプロジェクトで使用するために、[こちら](https://thegraph.com/studio/apikeys/)のリンクをご確認ください。 +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. API キーのドメインを制限するにはどうすればよいですか? From 2c7aa90f25d2fa8e646585ca83e816e6331a70c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:52 -0500 Subject: [PATCH 0900/1534] New translations subgraph-studio-faq.mdx (Portuguese) --- .../subgraphs/developing/deploying/subgraph-studio-faq.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/pt/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 512502e6d5d4..57c66e49c2e0 100644 --- a/website/src/pages/pt/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/pt/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: Perguntas Frequentes do Subgraph Studio ## 1. O que é o Subgraph Studio? -O [Subgraph Studio](https://thegraph.com/studio/) é um dapp para criar, gerir e editar subgraphs e chaves de API. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. Como criar uma Chave de API? @@ -12,7 +12,7 @@ Para criar uma API, navegue até o Subgraph Studio e conecte a sua carteira. Log ## 3. Posso criar várias Chaves de API? -Sim! Pode criar mais de uma Chave de API para usar em projetos diferentes. Confira [aqui](https://thegraph.com/studio/apikeys/). +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. Como restringir um domínio para uma Chave de API? @@ -26,6 +26,6 @@ Note que após a transferência, não poderá mais ver ou alterar o subgraph no ## 6. Se eu não for o programador do subgraph que quero usar, como encontro URLs de query para subgraphs? -A URL de query para cada subgraph está na seção Subgraph Details (Detalhes de Subgraph) do The Graph Explorer. O botão "Query" (Consulta) te levará a um painel com a URL de query do subgraph de seu interesse. Você pode então substituir o espaço `` com a chave de API que quer usar no Subgraph Studio. +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. Lembre-se que, mesmo se construir um subgraph por conta própria, ainda poderá criar uma chave de API e consultar qualquer subgraph publicado na rede. Estes queries através da nova chave API são pagos, como quaisquer outros na rede. From f1856c24943b39a77b8843b5f53ad99ecdd7157d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:53 -0500 Subject: [PATCH 0901/1534] New translations subgraph-studio-faq.mdx (Russian) --- .../subgraphs/developing/deploying/subgraph-studio-faq.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/ru/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 2ca6f958d83e..4e0eee2dba2d 100644 --- a/website/src/pages/ru/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/ru/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: Часто задаваемые вопросы о Subgraph Studio ## 1. Что такое Subgraph Studio? -[Subgraph Studio](https://thegraph.com/studio/) — это децентрализованное приложение для создания, управления и публикации субграфов и ключей API. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. Как создать ключ API? @@ -12,7 +12,7 @@ title: Часто задаваемые вопросы о Subgraph Studio ## 3. Могу ли я создать несколько ключей API? -Да! Вы можете создать несколько ключей API для использования в разных проектах. Перейдите по этой [ссылке](https://thegraph.com/studio/apikeys/). +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. Как мне настроить ограничения домена для ключа API? @@ -26,6 +26,6 @@ title: Часто задаваемые вопросы о Subgraph Studio ## 6. Как мне найти URL-адреса запросов для субграфов, если я не являюсь разработчиком субграфа, который хочу использовать? -Вы можете найти URL-адрес запроса каждого субграфа в разделе «Сведения о Субграфе» в Graph Explorer. После нажатия на кнопку «Запрос» Вы будете перенаправлены на панель, где сможете увидеть URL-адрес запроса интересующего Вас субграфа. Затем Вы можете заменить заполнитель `` ключом API, который хотите использовать в Subgraph Studio. +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. Помните, что Вы можете создать ключ API и запрашивать любой субграф, опубликованный в сети, даже если сами создаете субграф. Эти запросы через новый ключ API являются платными, как и любые другие в сети. From 94a59b9c64be876b81d817702dbdd957a73ea049 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:54 -0500 Subject: [PATCH 0902/1534] New translations subgraph-studio-faq.mdx (Swedish) --- .../sv/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/sv/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 27af4467124b..f2d35d39c1ee 100644 --- a/website/src/pages/sv/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/sv/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: Vanliga frågor om Subgraf Studio ## 1. Vad är Subgraf Studio? -[Subgraf Studio](https://thegraph.com/studio/) är en dapp för att skapa, hantera och publicera undergrafer och API-nycklar. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. Hur skapar jag en API-nyckel? @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. Kan jag skapa flera API-nycklar? -Ja, du kan skapa flera API-nycklar som du kan använda i olika projekt. Kolla in länken[ här](https://thegraph.com/studio/apikeys/). +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. Hur begränsar jag en domän för en API-nyckel? From 2a0ae448b10d5c5eb7042aaa92888d76060e7837 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:55 -0500 Subject: [PATCH 0903/1534] New translations subgraph-studio-faq.mdx (Turkish) --- .../subgraphs/developing/deploying/subgraph-studio-faq.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/tr/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 8213fdbbac4a..dc5b2fb87f0a 100644 --- a/website/src/pages/tr/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/tr/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: Subgrap Studio Hakkında SSS ## 1. Subgraph Stüdyo Nedir? -[Subgraph Studio](https://thegraph.com/studio/), subgraph'ler ve API anahtarları oluşturmak, yönetmek ve yayımlamaya yarayan bir dapp'tir. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. API Anahtarını Nasıl Oluşturabilirim? @@ -12,7 +12,7 @@ Bir API oluşturmak için Subgraph Studio'ya gidin ve cüzdanınızı bağlayın ## 3. Birden Çok API Anahtarı Oluşturabilir miyim? -Evet! Farklı projelerde kullanmak için birden fazla API anahtarı oluşturabilirsiniz. Daha fazla bilgi için [buraya](https://thegraph.com/studio/apikeys/) göz atın. +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. API Anahtarı için Domain'i Nasıl Kısıtlarım? @@ -26,6 +26,6 @@ Subgraph'i devrettikten sonra onu Studio'da artık göremeyeceğinizi veya düze ## 6. Kullanmak İstediğim Subgraph'ın Geliştiricisi Değilsem, bu Subgraphlar için Sorgu URL'lerini Nasıl Bulabilirim? -Her bir subgraph'in sorgu URL'sini Graph Gezgini'ndeki Subgraph Ayrıntıları bölümünde bulabilirsiniz. “Sorgula” düğmesine tıkladığınızda, ilgilendiğiniz subgraph'in sorgu URL'sini görüntüleyebileceğiniz bir panele yönlendirilirsiniz. Ardından, `` yer tutucusunu Subgraph Studio’da kullanmak istediğiniz API anahtarı ile değiştirebilirsiniz. +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. Unutmayın, bir API anahtarı oluşturarak ağda yayımlanmış herhangi bir subgraph'i sorgulayabilirsiniz; bu durum, kendi subgraph'inizi oluşturmuş olsanız bile geçerlidir. Bu yeni API anahtarı üzerinden yapılan sorgular, ağdaki diğer sorgular gibi ücretlidir. From 87051bf484ea50cfbcf0e0f1c8b58cd6701f9739 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:56 -0500 Subject: [PATCH 0904/1534] New translations subgraph-studio-faq.mdx (Chinese Simplified) --- .../zh/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/zh/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 7365bb62a3d8..b8d40e2f2dc3 100644 --- a/website/src/pages/zh/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/zh/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: 子图工作室常见问题 ## 1. 什么是 Subgraph Studio? -[子图工作室](https://thegraph.com/studio/)是一个用于创建、管理和发布子图和 API 密钥的 dapp。 +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. 如何创建 API 密钥? @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. 我可以创建多个 API 密钥吗? -是的! 您可以创建多个 API 密钥,以便在不同的项目中使用。查看[此处](https://thegraph.com/studio/apikeys/)的链接。 +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. 如何为 API 密钥限制域? From 61b1077f2c3b48624ced3facf8d9ccf45a3fbc31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:57 -0500 Subject: [PATCH 0905/1534] New translations subgraph-studio-faq.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/ur/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 424c64245d7d..3edb3d799ad3 100644 --- a/website/src/pages/ur/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/ur/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: سب گراف سٹوڈیو کے اکثر پوچھے گئے سوالات ## 1. سب گراف سٹوڈیو کیا ہے؟ -[سب گراف سٹوڈیو](https://thegraph.com/studio/) سب گراف اور API کیز بنانے، ان کا نظم کرنے اور شائع کرنے کے لیے ایک ڈیپ ہے. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. میں ایک API کلید کیسے بنا سکتا ہوں؟ @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. کیا میں ایک سے زیادہ API کلیدیں بنا سکتا ہوں؟ -جی ہاں! آپ مختلف پروجیکٹس میں استعمال کرنے کے لیے متعدد API کلیدیں بنا سکتے ہیں۔ لنک [یہاں](https://thegraph.com/studio/apikeys/) دیکھیں. +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. میں API کلید کے لیے ڈومین کو کیسے محدود کروں؟ From 8b032a4ae9d98130a805bc7118a39ed2550f0f17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:00:59 -0500 Subject: [PATCH 0906/1534] New translations subgraph-studio-faq.mdx (Marathi) --- .../mr/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/mr/subgraphs/developing/deploying/subgraph-studio-faq.mdx index badcf3fee7c6..f5729fb6cfa8 100644 --- a/website/src/pages/mr/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/mr/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: सबग्राफ स्टुडिओ FAQ ## 1. सबग्राफ स्टुडिओ म्हणजे काय? -[सबग्राफ स्टुडिओ](https://thegraph.com/studio/) हे सबग्राफ आणि API की तयार करण्यासाठी, व्यवस्थापित करण्यासाठी आणि प्रकाशित करण्यासाठी एक डॅप आहे. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. मी API की कशी तयार करू? @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. मी एकाधिक API की तयार करू शकतो? -होय! तुम्ही वेगवेगळ्या प्रकल्पांमध्ये वापरण्यासाठी एकाधिक API की तयार करू शकता. लिंक [येथे](https://thegraph.com/studio/apikeys/) पहा. +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. मी API की साठी डोमेन कसे प्रतिबंधित करू? From 00e3060783d03daa5dc689d74136203f6de0a54e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:00 -0500 Subject: [PATCH 0907/1534] New translations subgraph-studio-faq.mdx (Hindi) --- .../hi/subgraphs/developing/deploying/subgraph-studio-faq.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/hi/subgraphs/developing/deploying/subgraph-studio-faq.mdx index 9145a176e333..9901cc26d73f 100644 --- a/website/src/pages/hi/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/hi/subgraphs/developing/deploying/subgraph-studio-faq.mdx @@ -4,7 +4,7 @@ title: सबग्राफ स्टूडियो अक्सर पूछ ## 1. सबग्राफ स्टूडियो क्या है? -[सबग्राफ स्टूडियो](https://thegraph.com/studio/) सबग्राफ और एपीआई key बनाने, प्रबंधित करने और प्रकाशित करने के लिए एक डैप है। +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. ## 2. मैं एक एपीआई कुंजी कैसे बना सकता हूँ? @@ -12,7 +12,7 @@ To create an API, navigate to Subgraph Studio and connect your wallet. You will ## 3. क्या मैं कई एपीआई कुंजियां बना सकता हूं? -हाँ! आप विभिन्न परियोजनाओं में उपयोग करने के लिए कई एपीआई keys बना सकते हैं। [यहां](https://thegraph.com/studio/apikeys/) लिंक देखें। +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. मैं एपीआई कुंजी के लिए डोमेन को कैसे प्रतिबंधित करूं? From 8e6df69f82e443300aa9c7dc27349c9c98f3e3f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:01 -0500 Subject: [PATCH 0908/1534] New translations using-subgraph-studio.mdx (Romanian) --- .../developing/deploying/using-subgraph-studio.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ro/subgraphs/developing/deploying/using-subgraph-studio.mdx index 014299d9944d..634c2700ba68 100644 --- a/website/src/pages/ro/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ro/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -39,7 +39,7 @@ yarn global add @graphprotocol/graph-cli npm install -g @graphprotocol/graph-cli ``` -## Începe +## Get Started 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From c24da151066018421f045b676ecd2df5a962007b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:02 -0500 Subject: [PATCH 0909/1534] New translations using-subgraph-studio.mdx (French) --- .../developing/deploying/using-subgraph-studio.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/fr/subgraphs/developing/deploying/using-subgraph-studio.mdx index 0a23fac54c53..f4e354e2bb21 100644 --- a/website/src/pages/fr/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/fr/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -4,7 +4,7 @@ title: Déploiement en utilisant Subgraph Studio Apprenez à déployer votre subgraph sur Subgraph Studio. -> Note : Lorsque vous déployez un subgraph, vous le poussez vers Subgraph Studio, où vous pourrez le tester. Il est important de se rappeler que déployer n'est pas la même chose que publier. Lorsque vous publiez un subgraph, vous le publiez sur la blockchain. +> Remarque : lorsque vous déployez un subgraph, vous le transférez vers Subgraph Studio, où vous pourrez le tester. Il est important de se rappeler que le déploiement n'est pas la même chose que la publication. Lorsque vous publiez un subgraph, vous le publiez onchain. ## Présentation de Subgraph Studio @@ -30,7 +30,7 @@ Vous devez avoir [Node.js](https://nodejs.org/) et un gestionnaire de packages d ### Installation avec yarn ```bash -npm install -g @graphprotocol/graph-cli +yarn global add @graphprotocol/graph-cli ``` ### Installation avec npm @@ -53,7 +53,7 @@ npm install -g @graphprotocol/graph-cli -> Pour des informations supplémentaires écrites, consultez le [Quick Start](/subgraphs/quick-start/). +> Pour plus de détails écrits, consultez le [Démarrage rapide](/subgraphs/quick-start/). ### Compatibilité des subgraphs avec le réseau de The Graph @@ -86,7 +86,7 @@ Avant de pouvoir déployer votre subgraph sur Subgraph Studio, vous devez vous c Ensuite, utilisez la commande suivante pour vous authentifier depuis la CLI : ```bash -graph auth +graph auth ``` ## Déploiement d'un Subgraph @@ -114,7 +114,7 @@ Utilisez Subgraph Studio pour vérifier les journaux (logs) sur le tableau de bo ## Publiez votre subgraph -Pour publier votre subgraph avec succès, consultez la page [publier un subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +Afin de publier votre subgraph avec succès, consultez [publier un subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Versionning de votre subgraph avec le CLI @@ -124,9 +124,9 @@ Si vous souhaitez mettre à jour votre subgraph, vous pouvez faire ce qui suit : - Une fois que vous en êtes satisfait, vous pouvez publier votre nouveau déploiement sur [Graph Explorer](https://thegraph.com/explorer). - Cette action créera une nouvelle version de votre subgraph sur laquelle les Curateurs pourront commencer à signaler et que les Indexeurs pourront indexer. -Vous pouvez également mettre à jour les métadonnées de votre subgraph sans publier une nouvelle version. Vous pouvez mettre à jour les détails de votre subgraph dans Studio (sous la photo de profil, le nom, la description, etc.) en cochant une option appelée **Update Details** dans [Graph Explorer](https://thegraph.com/explorer). Si cette option est cochée, une transaction sera générée sur la blockchain (on-chain) pour mettre à jour les détails du subgraph dans Explorer sans avoir à publier une nouvelle version avec un nouveau déploiement. +Vous pouvez également mettre à jour les métadonnées de votre subgraph sans publier de nouvelle version. Vous pouvez mettre à jour les détails de votre subgraph dans Studio (sous la photo de profil, le nom, la description, etc.) en cochant une option appelée **Mettre à jour les détails** dans [Graph Explorer](https://thegraph.com/explorer). Si cette option est cochée, une transaction onchain sera générée qui mettra à jour les détails du subgraph dans Explorer sans avoir à publier une nouvelle version avec un nouveau déploiement. -> Remarque : Il y a des coûts associés à la publication d'une nouvelle version d'un subgraph sur le réseau. En plus des frais de transaction, vous devez également financer une partie de la taxe de curation sur le signal d'auto-migration . Vous ne pouvez pas publier une nouvelle version de votre subgraph si les Curateurs n'ont pas signalé dessus. Pour plus d'informations, veuillez lire plus [ici](/resources/roles/curating/). +> Remarque : la publication d'une nouvelle version d'un subgraph sur le réseau entraîne des coûts. En plus des frais de transaction, vous devez également financer une partie de la taxe de curation sur le signal de migration automatique. Vous ne pouvez pas publier une nouvelle version de votre subgraph si les Curateurs ne l'ont pas signalé. Pour plus d'informations, veuillez lire la suite [ici](/resources/roles/curating/). ## Archivage automatique des versions de subgraphs From de5ae4bfec7575c8338017ec91cbc4747758a748 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:03 -0500 Subject: [PATCH 0910/1534] New translations using-subgraph-studio.mdx (Spanish) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/es/subgraphs/developing/deploying/using-subgraph-studio.mdx index e1117a1ad51f..11e4e4c22495 100644 --- a/website/src/pages/es/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/es/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From 7b4926a0785b8b8393c33cc7371f05117fdc27b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:04 -0500 Subject: [PATCH 0911/1534] New translations using-subgraph-studio.mdx (Arabic) --- .../developing/deploying/using-subgraph-studio.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ar/subgraphs/developing/deploying/using-subgraph-studio.mdx index 6b55625bb64a..d8880ef1a196 100644 --- a/website/src/pages/ar/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ar/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -39,7 +39,7 @@ yarn global add @graphprotocol/graph-cli npm install -g @graphprotocol/graph-cli ``` -## البدء +## Get Started 1. Open [Subgraph Studio](https://thegraph.com/studio/). 2. Connect your wallet to sign in. @@ -63,7 +63,7 @@ In order to be supported by Indexers on The Graph Network, subgraphs must: - يجب ألا تستخدم أيًا من الميزات التالية: - ipfs.cat & ipfs.map - أخطاء غير فادحة - - تطعيم(Grafting) + - Grafting ## Initialize Your Subgraph @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From 52a4169d228a7af8fce2fef8b47abb6aebfe5310 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:05 -0500 Subject: [PATCH 0912/1534] New translations using-subgraph-studio.mdx (Czech) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/cs/subgraphs/developing/deploying/using-subgraph-studio.mdx index b5336cdc1f95..7c53f174237a 100644 --- a/website/src/pages/cs/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/cs/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From 4546095338538f44d64a2aa40ce1cfe078137981 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:07 -0500 Subject: [PATCH 0913/1534] New translations using-subgraph-studio.mdx (German) --- .../deploying/using-subgraph-studio.mdx | 94 +++++++++---------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/de/subgraphs/developing/deploying/using-subgraph-studio.mdx index 37add19071c4..b559bcdff049 100644 --- a/website/src/pages/de/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/de/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -2,30 +2,30 @@ title: Bereitstellung mit Subgraph Studio --- -Erfahren Sie, wie Sie Ihren Subgraph in Subgraph Studio bereitstellen können. +Erfahren Sie, wie Sie Ihren Subgraphen in Subgraph Studio bereitstellen können. -> Hinweis: Wenn Sie einen Subgraph bereitstellen, schieben Sie ihn zu Subgraph Studio, wo Sie ihn testen können. Es ist wichtig zu wissen, dass Bereitstellen nicht dasselbe ist wie Veröffentlichen. Wenn Sie einen Subgraph veröffentlichen, dann veröffentlichen Sie ihn in der Kette. +> Hinweis: Wenn Sie einen Subgraphen bereitstellen, schieben Sie ihn zu Subgraph Studio, wo Sie ihn testen können. Es ist wichtig, daran zu denken, dass Bereitstellen nicht dasselbe ist wie Veröffentlichen. Wenn Sie einen Subgraphen veröffentlichen, dann veröffentlichen Sie ihn onchain. ## Subgraph Studio Überblick In [Subgraph Studio] (https://thegraph.com/studio/) können Sie Folgendes tun: -- Eine Liste der von Ihnen erstellten Subgraphs anzeigen -- Verwalten, Anzeigen von Details und Visualisieren des Status eines bestimmten Subgraphen -- Erstellen und verwalten Sie Ihre API-Schlüssel für bestimmte Subgraphen -- Schränken Sie Ihre API-Schlüssel auf bestimmte Domains ein und erlauben Sie nur bestimmten Indexern die Abfrage mit diesen Schlüsseln -- Erstellen Sie Ihren Subgraph -- Verteilen Sie Ihren Subgraph mit The Graph CLI -- Testen Sie Ihren Subgraph in der „Playground“-Umgebung -- Integrieren Sie Ihren Subgraph in Staging unter Verwendung der Entwicklungsabfrage-URL -- Veröffentlichen Sie Ihren Subgraph auf The Graph Network -- Verwalten Sie Ihre Rechnungen +- Eine Liste der von Ihnen erstellten Subgraphen anzeigen +- Verwalten, Details anzeigen und den Status eines bestimmten Subgraphen visualisieren +- Ihre API-Schlüssel für bestimmte Subgraphen erstellen und verwalten +- Ihre API-Schlüssel auf bestimmte Domains einschränken und nur bestimmten Indexern die Abfrage mit diesen Schlüsseln erlauben +- Ihren Subgraphen erstellen +- Ihren Subgraphen mit The Graph CLI verteilen +- Ihren Subgraphen in der „Playground“-Umgebung testen +- Ihren Subgraphen in Staging unter Verwendung der Entwicklungsabfrage-URL integrieren +- Ihren Subgraphen auf The Graph Network veröffentlichen +- Ihre Rechnungen verwalten -## Installieren der Graph-CLI +## Installieren der The Graph-CLI Vor der Bereitstellung müssen Sie The Graph CLI installieren. -Sie müssen [Node.js](https://nodejs.org/) und einen Paketmanager Ihrer Wahl (`npm`, `yarn` oder `pnpm`) installiert haben, um The Graph CLI zu verwenden. Prüfen Sie, ob die [aktuellste](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI-Version installiert ist. +Sie müssen [Node.js](https://nodejs.org/) und einen Paketmanager Ihrer Wahl (`npm`, `yarn` oder `pnpm`) installiert haben, um The Graph-CLI zu verwenden. Prüfen Sie, ob die [aktuellste](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI-Version installiert ist. ### Installieren mit yarn @@ -39,28 +39,28 @@ yarn global add @graphprotocol/graph-cli npm install -g @graphprotocol/graph-cli ``` -## Los geht’s +## Erste Schritte 1. Öffnen Sie [Subgraph Studio] (https://thegraph.com/studio/). 2. Verbinden Sie Ihre Wallet, um sich anzumelden. - Sie können dies über MetaMask, Coinbase Wallet, WalletConnect oder Safe tun. 3. Nachdem Sie sich angemeldet haben, wird Ihr eindeutiger Verteilungsschlüssel auf der Detailseite Ihres Subgraphen angezeigt. - - Mit dem Bereitstellungsschlüssel können Sie Ihre Subgraphs veröffentlichen oder Ihre API-Schlüssel und Abrechnungen verwalten. Er ist einmalig, kann aber neu generiert werden, wenn Sie glauben, dass er kompromittiert wurde. + - Mit dem Bereitstellungsschlüssel können Sie Ihre Subgraphen veröffentlichen oder Ihre API-Schlüssel und Abrechnungen verwalten. Er ist einmalig, kann aber neu generiert werden, wenn Sie glauben, dass er kompromittiert wurde. -> Wichtig: Sie benötigen einen API-Schlüssel, um Subgraphs abzufragen +> Wichtig: Sie benötigen einen API-Schlüssel, um Subgraphen abzufragen -### So erstellen Sie einen Subgraph in Subgraph Studio +### So erstellen Sie einen Subgraphen in Subgraph Studio -> Weitere schriftliche Informationen finden Sie im [Schnellstart](/subgraphs/quick-start/). +> Weitere schriftliche Details finden Sie im [Quick Start](/subgraphs/quick-start/). -### Kompatibilität von Subgraphs mit dem The Graph Network +### Kompatibilität von Subgraphen mit dem The Graph Network Um von Indexern auf The Graph Network unterstützt zu werden, müssen Subgraphen: -- Ein [unterstütztes Netzwerk] indizieren (/supported-networks/) -- Sie dürfen keine der folgenden Funktionen verwenden: +- Ein [unterstütztes Netzwerk](/supported-networks/) indizieren +- Keine der folgenden Funktionen verwenden: - ipfs.cat & ipfs.map - Non-fatal errors - Grafting @@ -73,15 +73,15 @@ Sobald Ihr Subgraph in Subgraph Studio erstellt wurde, können Sie seinen Code graph init ``` -Sie finden den Wert `` auf der Detailseite Ihres Subgraphs in Subgraph Studio, siehe Abbildung unten: +Sie finden den Wert `` auf der Detailseite Ihres Subgraphen in Subgraph Studio, siehe Abbildung unten: ![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) -Nachdem Sie `graph init` ausgeführt haben, werden Sie aufgefordert, die Vertragsadresse, das Netzwerk und eine ABI einzugeben, die Sie abfragen möchten. Daraufhin wird ein neuer Ordner auf Ihrem lokalen Rechner erstellt, der einige grundlegende Codes enthält, um mit der Arbeit an Ihrem Subgraph zu beginnen. Anschließend können Sie Ihren Subgraph fertigstellen, um sicherzustellen, dass er wie erwartet funktioniert. +Nachdem Sie `graph init` ausgeführt haben, werden Sie aufgefordert, die Vertragsadresse, das Netzwerk und eine ABI einzugeben, die Sie abfragen möchten. Daraufhin wird ein neuer Ordner auf Ihrem lokalen Computer erstellt, der einige grundlegende Code enthält, um mit der Arbeit an Ihrem Subgraphen zu beginnen. Anschließend können Sie Ihren Subgraphen fertigstellen, um sicherzustellen, dass er wie erwartet funktioniert. ## Graph Auth -Bevor Sie Ihren Subgraph in Subgraph Studio bereitstellen können, müssen Sie sich bei Ihrem Konto in der CLI anmelden. Dazu benötigen Sie Ihren Bereitstellungsschlüssel, den Sie auf der Seite mit den Details Ihres Subgraphen finden. +Bevor Sie Ihren Subgraphen in Subgraph Studio bereitstellen können, müssen Sie sich bei Ihrem Konto in der CLI anmelden. Dazu benötigen Sie Ihren Bereitstellungsschlüssel, den Sie auf der Seite mit den Details Ihres Subgraphen finden. Verwenden Sie dann den folgenden Befehl, um sich über die CLI zu authentifizieren: @@ -89,49 +89,49 @@ Verwenden Sie dann den folgenden Befehl, um sich über die CLI zu authentifizier graph auth ``` -## Bereitstellen eines Subgraphs +## Bereitstellen eines Subgraphen -Once you are ready, you can deploy your subgraph to Subgraph Studio. +Sobald Sie fertig sind, können Sie Ihren Subgraphen an Subgraph Studio übergeben. -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. +> Wenn Sie einen Subgraphen über die Befehlszeilenschnittstelle bereitstellen, wird er in das Studio übertragen, wo Sie ihn testen und die Metadaten aktualisieren können. Bei dieser Aktion wird Ihr Subgraph nicht im dezentralen Netzwerk veröffentlicht. -Use the following CLI command to deploy your subgraph: +Verwenden Sie den folgenden CLI-Befehl, um Ihren Subgraphen bereitzustellen: ```bash graph deploy ``` -After running this command, the CLI will ask for a version label. +Nach der Ausführung dieses Befehls wird die CLI nach einer Versionsbezeichnung fragen. -- It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as `v1`, `version1`, or `asdf`. -- The labels you create will be visible in Graph Explorer and can be used by curators to decide if they want to signal on a specific version or not, so choose them wisely. +- Es wird dringend empfohlen, [semver](https://semver.org/) für die Versionierung wie `0.0.1` zu verwenden. Es steht Ihnen jedoch frei, eine beliebige Zeichenkette als Version zu verwenden, z. B. `v1`, `version1` oder `asdf`. +- Die von Ihnen erstellten Labels sind im Graph Explorer sichtbar und können von den Kuratoren verwendet werden, um zu entscheiden, ob sie eine bestimmte Version signalisieren wollen oder nicht, also wählen Sie sie mit Bedacht. -## Testing Your Subgraph +## Testen Ihres Subgraphen -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. +Nach der Bereitstellung können Sie Ihren Subgraphen testen (entweder in Subgraph Studio oder in Ihrer eigenen Anwendung, mit der Bereitstellungsabfrage-URL), eine weitere Version bereitstellen, die Metadaten aktualisieren und im [Graph Explorer](https://thegraph.com/explorer) veröffentlichen, wenn Sie bereit sind. -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. +Verwenden Sie Subgraph Studio, um die Protokolle auf dem Dashboard zu überprüfen und nach Fehlern in Ihrem Subgraphen zu suchen. -## Publish Your Subgraph +## Veröffentlichung Ihres Subgraphen -In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). +Um Ihren Subgraphen erfolgreich zu veröffentlichen, lesen Sie [Veröffentlichen eines Subgraphen](/subgraphs/developing/publishing/publishing-a-subgraph/). -## Versioning Your Subgraph with the CLI +## Versionierung Ihres Subgraphen mit der CLI -If you want to update your subgraph, you can do the following: +Wenn Sie Ihren Subgraphen aktualisieren möchten, können Sie wie folgt vorgehen: -- You can deploy a new version to Studio using the CLI (it will only be private at this point). -- Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. +- Sie können eine neue Version über die Befehlszeilenschnittstelle (CLI) in Studio bereitstellen (zu diesem Zeitpunkt ist sie nur privat). +- Wenn Sie damit zufrieden sind, können Sie Ihre neue Bereitstellung im [Graph Explorer] (https://thegraph.com/explorer) veröffentlichen. +- Mit dieser Aktion wird eine neue Version Ihres Subgraphen erstellt, die von Kuratoren mit Signalen versehen und von Indexern indiziert werden kann. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +Sie können auch die Metadaten Ihres Subgraphen aktualisieren, ohne eine neue Version zu veröffentlichen. Sie können Ihre Subgraph-Details in Studio (unter dem Profilbild, dem Namen, der Beschreibung usw.) aktualisieren, indem Sie eine Option namens **Details aktualisieren** im [Graph Explorer] (https://thegraph.com/explorer) aktivieren. Wenn diese Option aktiviert ist, wird eine Onchain-Transaktion generiert, die die Subgraph-Details im Explorer aktualisiert, ohne dass eine neue Version mit einer neuen Bereitstellung veröffentlicht werden muss. -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). +> Hinweis: Die Veröffentlichung einer neuen Version eines Subgraphen im Netz ist mit Kosten verbunden. Zusätzlich zu den Transaktionsgebühren müssen Sie auch einen Teil der Kurationssteuer für das Auto-Migrations-Signal finanzieren. Sie können keine neue Version Ihres Subgraphen veröffentlichen, wenn Kuratoren nicht darauf signalisiert haben. Für weitere Informationen, lesen Sie bitte [hier](/resources/roles/curating/). -## Automatic Archiving of Subgraph Versions +## Automatische Archivierung von Subgraph-Versionen -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. +Immer wenn Sie eine neue Subgraph-Version in Subgraph Studio bereitstellen, wird die vorherige Version archiviert. Archivierte Versionen werden nicht indiziert/synchronisiert und können daher nicht abgefragt werden. Sie können die Archivierung einer archivierten Version Ihres Subgraphen in Subgraph Studio dearchivieren. -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. +> Hinweis: Frühere Versionen von nicht veröffentlichten Subgraphen, die in Studio bereitgestellt wurden, werden automatisch archiviert. ![Subgraph Studio - Unarchive](/img/Unarchive.png) From 8ca256757c718ca9343bfd9de4e861f14172d2a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:08 -0500 Subject: [PATCH 0914/1534] New translations using-subgraph-studio.mdx (Italian) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/it/subgraphs/developing/deploying/using-subgraph-studio.mdx index d93b3ae379e1..6d7e019d9d6f 100644 --- a/website/src/pages/it/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/it/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From aca4ceae5a3ad8c1aa60e7c383a5a44ec3c1d053 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:09 -0500 Subject: [PATCH 0915/1534] New translations using-subgraph-studio.mdx (Japanese) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ja/subgraphs/developing/deploying/using-subgraph-studio.mdx index 5d4f1ff3e537..21bb85d4fb51 100644 --- a/website/src/pages/ja/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ja/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From 9de00a200ad3dd24c6db3923b377e03c67d8964b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:09 -0500 Subject: [PATCH 0916/1534] New translations using-subgraph-studio.mdx (Korean) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ko/subgraphs/developing/deploying/using-subgraph-studio.mdx index 75934f699764..634c2700ba68 100644 --- a/website/src/pages/ko/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ko/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From 75ba44593d965ae2a212f5b07e422d3f36cb531f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:11 -0500 Subject: [PATCH 0917/1534] New translations using-subgraph-studio.mdx (Dutch) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/nl/subgraphs/developing/deploying/using-subgraph-studio.mdx index c39ad57ae8c0..04fca3fb140a 100644 --- a/website/src/pages/nl/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/nl/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From ba36b6e77c2a06a724a30bd052c778d7f940b016 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:12 -0500 Subject: [PATCH 0918/1534] New translations using-subgraph-studio.mdx (Polish) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/pl/subgraphs/developing/deploying/using-subgraph-studio.mdx index 5b2ac3d1b1a5..d2023c7b4a09 100644 --- a/website/src/pages/pl/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/pl/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From a07cbe217da7c15edfbaef3b03c33592cde508d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:13 -0500 Subject: [PATCH 0919/1534] New translations using-subgraph-studio.mdx (Portuguese) --- .../developing/deploying/using-subgraph-studio.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/pt/subgraphs/developing/deploying/using-subgraph-studio.mdx index 785cbd56a4f7..d9e9be3f83e9 100644 --- a/website/src/pages/pt/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/pt/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -63,7 +63,7 @@ Para ter apoio de Indexadores na Graph Network, os subgraphs devem: - Não deve usar quaisquer das seguintes características: - ipfs.cat & ipfs.map - Erros não-fatais - - Enxertos + - Enxerto ## Initialize Your Subgraph @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From 203bdf9606bf0ad1bb1ee1f1f840675a148e4839 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:14 -0500 Subject: [PATCH 0920/1534] New translations using-subgraph-studio.mdx (Russian) --- .../deploying/using-subgraph-studio.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ru/subgraphs/developing/deploying/using-subgraph-studio.mdx index adf9e3c18381..e1aadd279a0b 100644 --- a/website/src/pages/ru/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ru/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Развертывание с использованием Subgraph Studio +title: Deploying Using Subgraph Studio --- Узнайте, как развернуть свой субграф в Subgraph Studio. -> Примечание: При развертывании субграфа Вы отправляете его в Subgraph Studio, где сможете его протестировать. Важно помнить, что развертывание — это не то же самое, что публикация. При публикации субграфа Вы размещаете его на чейне. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Обзор Subgraph Studio @@ -53,16 +53,16 @@ npm install -g @graphprotocol/graph-cli -> Для получения дополнительной информации ознакомьтесь с разделом [Быстрый старт](/subgraphs/quick-start/). +> For additional written detail, review the [Quick Start](/subgraphs/quick-start/). ### Совместимость подграфов с сетью Graph In order to be supported by Indexers on The Graph Network, subgraphs must: -- Индексировать [поддерживаемую сеть](/supported-networks/) +- Index a [supported network](/supported-networks/) - Не должны использовать ни одну из следующих функций: - ipfs.cat & ipfs.map - - Неисправимые ошибки + - Нефатальные ошибки - Grafting ## Инициализация Вашего Субграфа @@ -114,7 +114,7 @@ graph deploy ## Публикация Вашего субграфа -Чтобы успешно опубликовать свой субграф, ознакомьтесь с [публикацией субграфа](/subgraphs/developing/publishing/publishing-a-subgraph/). +In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## Управление версиями Вашего субграфа с помощью CLI @@ -124,9 +124,9 @@ graph deploy - Если результат Вас устроит, Вы можете опубликовать новое развертывание в [Graph Explorer](https://thegraph.com/explorer). - Это действие создаст новую версию вашего субграфа, о которой Кураторы смогут начать сигнализировать, а Индексаторы — индексировать. -Вы также можете обновить метаданные субграфа без публикации новой версии. В Studio можно обновить данные субграфа (аватар, название, описание и т.д.), выбрав опцию **Обновить данные** в [Graph Explorer](https://thegraph.com/explorer). Если эта опция выбрана, будет сгенерирована транзакция в сети, которая обновит информацию о субграфе в Explorer без необходимости публиковать новую версию с новым развертыванием. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. -> Примечание: Публикация новой версии субграфа в сети требует определенных затрат. Помимо комиссий за транзакцию, Вам нужно также оплатить часть кураторского сбора за авто-миграционный сигнал. Вы не сможете опубликовать новую версию субграфа, если на нее не был подан сигнал от Кураторов. Дополнительную информацию можно прочитать [здесь](/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Автоматическое архивирование версий подграфа From 8cd27c1a1059e78ff378f072507f4d054be581d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:15 -0500 Subject: [PATCH 0921/1534] New translations using-subgraph-studio.mdx (Swedish) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/sv/subgraphs/developing/deploying/using-subgraph-studio.mdx index b78104ccc2e2..cf6d67e5bb9d 100644 --- a/website/src/pages/sv/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/sv/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From b528933f5a58be536a1d73e9e55f4ff037ccf3ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:16 -0500 Subject: [PATCH 0922/1534] New translations using-subgraph-studio.mdx (Turkish) --- .../deploying/using-subgraph-studio.mdx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/tr/subgraphs/developing/deploying/using-subgraph-studio.mdx index 9ad11b4b3c47..d7aaee820f01 100644 --- a/website/src/pages/tr/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/tr/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Subgraph Studio Kullanarak Dağıtma +title: Deploying Using Subgraph Studio --- Subgraph'inizi Subgraph Studio'da dağıtma adımlarını öğrenin. -> Not: Bir subgraph'i yayına aldığınızda, onu Subgraph Studio’ya iletmiş olursunuz ve orada test edebilirsiniz. Dağıtmanın yayımlamakla aynı şey olmadığını hatırlamak önemlidir. Bir subgraph'i yayımladığınızda, onu zincir üzerinde yayımlamış olursunuz. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio'ya Genel Bakış @@ -53,17 +53,17 @@ npm install -g @graphprotocol/graph-cli -> Daha fazla yazılı detay için [Hızlı Başlangıç](/subgraphs/quick-start/) bölümünü inceleyin. +> For additional written detail, review the [Quick Start](/subgraphs/quick-start/). ### The Graph Ağı ile Subgraph Uyumluluğu Subgraph'lerin Graph Ağı Endeksleyicileri tarafından desteklenebilmesi için şu gereklilikleri karşılaması gerekir: -- [Desteklenen bir ağı](/supported-networks/) endeksliyor olmalı +- Index a [supported network](/supported-networks/) - Aşağıdaki özelliklerden hiçbirini kullanmamalı: - ipfs.cat & ipfs.map - - Kritik olmayan hatalar - - Graftlama + - Ölümcül Olmayan Hatalar + - Aşılama ## Subgraph'inizi İlklendirme @@ -114,7 +114,7 @@ Subgraph Studio’da günlükleri kontrol ederek subgraph’inizle ilgili hatala ## Subgraph’inizi Yayımlama -Subgraph’inizi başarıyla yayımlamak için [subgraph yayımlama](/subgraphs/developing/publishing/publishing-a-subgraph/) adımlarını gözden geçirin. +In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## CLI ile Subgraph’inizi Sürümleme @@ -124,9 +124,9 @@ Subgraph’inizi güncellemek isterseniz, aşağıdaki adımları izleyebilirsin - Memnun kaldığınızda, yeni dağıtımınızı [Graph Gezgini](https://thegraph.com/explorer)'nde yayımlayabilirsiniz. - Bu işlem, küratörlerin sinyal vermeye başlayabileceği ve Endeksleyicilerin endeksleyebileceği, subgraph'inizin yeni bir sürümünü oluşturur. -Ayrıca, subgraph'inizin meta verilerini yeni bir sürüm yayımlamak zorunda kalmadan güncelleyebilirsiniz. Studio’daki (profil resmi, isim, açıklama gibi) subgraph ayrıntılarını [Graph Gezgini](https://thegraph.com/explorer)'ndeki **Ayrıntıları Güncelle** seçeneğini işaretleyerek güncelleyebilirsiniz. Bu seçenek işaretlendiğinde, yeni bir sürüm yayımlamaya gerek kalmadan, Explorer'da subgraph ayrıntılarını güncelleyen bir blokzincir işlemi oluşturulur. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. -> Bir subgraph’in yeni bir sürümünü ağda yayımlamanın maliyetleri olduğunu unutmayın. İşlem ücretlerine ek olarak, otomatik olarak taşınan sinyalin kürasyon vergisinin bir kısmını da finanse etmeniz gerekmektedir. Küratörler subgraph'inize sinyal vermemişse subgraph'inizin yeni bir sürümünü yayımlayamazsınız. Daha fazla bilgi için [buraya](/resources/roles/curating/) göz atın. +> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## Subgraph Sürümlerinin Otomatik Arşivlenmesi From cbe1825c31143e68ad30a27c19cf51fe9f9be4e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:17 -0500 Subject: [PATCH 0923/1534] New translations using-subgraph-studio.mdx (Ukrainian) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/uk/subgraphs/developing/deploying/using-subgraph-studio.mdx index dbd0bf2c9caa..db3f790fdfe6 100644 --- a/website/src/pages/uk/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/uk/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From e5c148d61c0bd726c41ee4a90aa6c47b0ac19545 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:18 -0500 Subject: [PATCH 0924/1534] New translations using-subgraph-studio.mdx (Chinese Simplified) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/zh/subgraphs/developing/deploying/using-subgraph-studio.mdx index a9fac5c2fbd0..0e20b7f2a2a0 100644 --- a/website/src/pages/zh/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/zh/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From d1f0f3591cea7d1367866a0f6bb1dedc4c857212 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:19 -0500 Subject: [PATCH 0925/1534] New translations using-subgraph-studio.mdx (Urdu (Pakistan)) --- .../developing/deploying/using-subgraph-studio.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/ur/subgraphs/developing/deploying/using-subgraph-studio.mdx index a07e276b355f..2d16e87e3f7a 100644 --- a/website/src/pages/ur/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/ur/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -63,7 +63,7 @@ In order to be supported by Indexers on The Graph Network, subgraphs must: - درج ذیل خصوصیات میں سے کوئی بھی استعمال نہیں کرنا چاہیے: - ipfs.cat & ipfs.map - Non-fatal errors - - Grafting + - گرافٹنگ ## Initialize Your Subgraph @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From e88000dc2b69cfea40ecc220f4bb78d0fd431fb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:20 -0500 Subject: [PATCH 0926/1534] New translations using-subgraph-studio.mdx (Vietnamese) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/vi/subgraphs/developing/deploying/using-subgraph-studio.mdx index 462dac6569f9..98602d583746 100644 --- a/website/src/pages/vi/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/vi/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From 0d42e93ba8696cca3894f7c36880c2c7baee2852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:21 -0500 Subject: [PATCH 0927/1534] New translations using-subgraph-studio.mdx (Marathi) --- .../developing/deploying/using-subgraph-studio.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/mr/subgraphs/developing/deploying/using-subgraph-studio.mdx index 3849956c60b9..4769cbc3408b 100644 --- a/website/src/pages/mr/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/mr/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Deploy Using Subgraph Studio +title: Deploying Using Subgraph Studio --- Learn how to deploy your subgraph to Subgraph Studio. -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio Overview @@ -124,7 +124,7 @@ If you want to update your subgraph, you can do the following: - Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). - This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. > Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). From 46097ee3d77e454271488983a409e78e08030db7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:23 -0500 Subject: [PATCH 0928/1534] New translations using-subgraph-studio.mdx (Hindi) --- .../deploying/using-subgraph-studio.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/deploying/using-subgraph-studio.mdx b/website/src/pages/hi/subgraphs/developing/deploying/using-subgraph-studio.mdx index 4b39692805df..6e69b94f4c40 100644 --- a/website/src/pages/hi/subgraphs/developing/deploying/using-subgraph-studio.mdx +++ b/website/src/pages/hi/subgraphs/developing/deploying/using-subgraph-studio.mdx @@ -1,10 +1,10 @@ --- -title: Subgraph Studio का उपयोग करके डिप्लॉय करें +title: Deploying Using Subgraph Studio --- अपने subgraph को Subgraph Studio में डिप्लॉय करना सीखें। -> नोट: जब आप एक subgraph को डिप्लॉय करते हैं, तो आप इसे Subgraph Studio पर पुश करते हैं, जहाँ आप इसे टेस्ट कर सकेंगे। यह याद रखना महत्वपूर्ण है कि डिप्लॉय करना प्रकाशित करने के समान नहीं है। जब आप एक subgraph को प्रकाशित करते हैं, तो आप इसे ऑन-चेन प्रकाशित कर रहे हैं। +> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it onchain. ## Subgraph Studio का अवलोकन @@ -17,7 +17,7 @@ In Subgraph Studio,आप निम - अपना subgraph बनाएं - अपने subgraph को The Graph CLI का उपयोग करके डिप्लॉय करें - अपने 'subgraph' को 'playground' वातावरण में टेस्ट करें -- अपने स्टेजिंग में 'subgraph' को विकास क्वेरी URL का उपयोग करके एकीकृत करें +- अपने स्टेजिंग में 'subgraph' को विकास क्वेरी URL का उपयोग करके एकीकृत करें - अपने subgraph को The Graph Network पर प्रकाशित करें - अपने बिलिंग को प्रबंधित करें @@ -25,7 +25,7 @@ In Subgraph Studio,आप निम Deploy करने से पहले, आपको The Graph CLI इंस्टॉल करना होगा। -आपको The Graph CLI का उपयोग करने के लिए Node.js(https://nodejs.org/) और आपकी पसंद का पैकेज मैनेजर (npm, yarn या pnpm) स्थापित होना चाहिए। सबसे हालिया (https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI संस्करण की जांच करें। +आपको The Graph CLI का उपयोग करने के लिए Node.js(https://nodejs.org/) और आपकी पसंद का पैकेज मैनेजर (npm, yarn या pnpm) स्थापित होना चाहिए। सबसे हालिया (https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI संस्करण की जांच करें। ### इंस्टॉल करें 'yarn' के साथ @@ -39,7 +39,7 @@ yarn global add @graphprotocol/graph-cli npm install -g @graphprotocol/graph-cli ``` -## शुरू हो जाओ +## शुरू करिये 1. खोलें [Subgraph Studio](https://thegraph.com/studio/). 2. अपने वॉलेट से साइन इन करें। @@ -53,13 +53,13 @@ npm install -g @graphprotocol/graph-cli -> अतिरिक्त लिखित विवरण के लिए, Quick Start(/subgraphs/quick-start/) की समीक्षा करें। +> For additional written detail, review the [Quick Start](/subgraphs/quick-start/). ### ग्राफ नेटवर्क के साथ सबग्राफ अनुकूलता In order to be supported by Indexers on The Graph Network, subgraphs must: -- एक supported network(/supported-networks/) को अनुक्रमित करें +- Index a [supported network](/supported-networks/) - निम्नलिखित सुविधाओं में से किसी का उपयोग नहीं करना चाहिए: - ipfs.cat & ipfs.map - गैर-घातक त्रुटियाँ @@ -114,7 +114,7 @@ Subgraph Studio का उपयोग करके डैशबोर्ड प ## अपने Subgraph को प्रकाशित करें -अपने subgraph को सफलतापूर्वक प्रकाशित करने के लिए, publishing a subgraph(/subgraphs/developing/publishing/publishing-a-subgraph/). की समीक्षा करें। +In order to publish your subgraph successfully, review [publishing a subgraph](/subgraphs/developing/publishing/publishing-a-subgraph/). ## अपने Subgraph को CLI के साथ संस्करण बनाना @@ -124,9 +124,9 @@ Subgraph Studio का उपयोग करके डैशबोर्ड प - एक बार जब आप इससे संतुष्ट हो जाएं, तो आप अपने नए डिप्लॉयमेंट को Graph Explorer(https://thegraph.com/explorer). पर प्रकाशित कर सकते हैं। - यह क्रिया आपके नए संस्करण का निर्माण करेगी जिसे Curators सिग्नल करना शुरू कर सकते हैं और Indexers अनुक्रमित कर सकते हैं। -आप अपने subgraph के मेटाडेटा को बिना नई संस्करण प्रकाशित किए भी अपडेट कर सकते हैं। आप Studio में अपने subgraph विवरण (प्रोफ़ाइल चित्र, नाम, विवरण, आदि के अंतर्गत) को [Graph Explorer](https://thegraph.com/explorer). में Update Details नामक विकल्प को चेक करके अपडेट कर सकते हैं। यदि यह चेक किया गया है, तो एक ऑन-चेन लेनदेन उत्पन्न होगा जो Explorer में subgraph विवरण को अपडेट करेगा, बिना नई तैनाती के साथ नया संस्करण प्रकाशित किए। +You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an onchain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. -> Note: नेटवर्क पर नए संस्करण को प्रकाशित करने से संबंधित लागतें हैं। लेनदेन शुल्क के अलावा, आपको ऑटो-माइग्रेटिंग सिग्नल पर क्यूरेशन टैक्स का एक भाग भी फंड करना होगा। यदि क्यूरेटरों ने इस पर सिग्नल नहीं किया है, तो आप अपने सबग्राफ का नया संस्करण प्रकाशित नहीं कर सकते। अधिक जानकारी के लिए, कृपया और पढ़ें यहाँ(/resources/roles/curating/). +> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/resources/roles/curating/). ## सबग्राफ संस्करणों का स्वचालित संग्रह From 42ef092ee7470bad9db995bb66e2d20f11117e68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:24 -0500 Subject: [PATCH 0929/1534] New translations developer-faq.mdx (Romanian) --- website/src/pages/ro/subgraphs/developing/developer-faq.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/developing/developer-faq.mdx b/website/src/pages/ro/subgraphs/developing/developer-faq.mdx index 45b99b086165..8dbe6d23ad39 100644 --- a/website/src/pages/ro/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ro/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: FAQs al Developerilor +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. From c13841ab836b36a8fc0be81f827711fb24232ca9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:25 -0500 Subject: [PATCH 0930/1534] New translations developer-faq.mdx (French) --- .../fr/subgraphs/developing/developer-faq.mdx | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/developer-faq.mdx b/website/src/pages/fr/subgraphs/developing/developer-faq.mdx index 5052da626232..e2bb16ce90af 100644 --- a/website/src/pages/fr/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/fr/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: FAQs pour les développeurs +title: FAQ pour les développeurs +sidebarTitle: FAQ --- Cette page résume certaines des questions les plus courantes pour les développeurs construisant sur The Graph. @@ -12,7 +13,7 @@ Un subgraph est une API personnalisée construite sur des données blockchain. L ### 2. Quelle est la première étape pour créer un subgraph ? -Pour créer avec succès un subgraph, vous devrez installer Graph CLI. Consultez le [guide de démarrage rapide](/subgraphs/quick-start/) pour commencer. Pour des informations détaillées, voir [Création d'un subgraph](/developing/creating-a-subgraph/). +Pour créer un subgraph avec succès, vous devez installer Graph CLI. Consultez le [Démarrage rapide](/subgraphs/quick-start/) pour commencer. Pour des informations détaillées, consultez [Création d'un subgraph](/developing/creating-a-subgraph/). ### 3. Suis-je toujours en mesure de créer un subgraph si mes smart contracts n'ont pas d'événements ? @@ -34,9 +35,9 @@ Vous devez redéployer le subgraph, mais si l'ID de subgraph (hachage IPFS) ne c ### 7. Comment puis-je appeler une fonction d'un contrat ou accéder à une variable d'état publique depuis mes mappages de subgraph ? -Consultez `Accès à l'état du contrat intelligent` dans la section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). +Jetez un œil à l’état `Accès au contrat intelligent` dans la section [API AssemblyScript](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Puis-je importer `ethers.js` ou d'autres bibliothèques JS dans mes mappages de subgraph ? +### 8. Puis-je importer `ethers.js` ou d'autres bibliothèques JS dans mes mappages de subgraphs ? Actuellement non, car les mappages sont écrits en AssemblyScript. @@ -50,11 +51,11 @@ Dans un subgraph, les événements sont toujours traités dans l'ordre dans lequ Les modèles vous permettent de créer rapidement des sources de données , pendant que votre subgraph est en cours d'indexation. Votre contrat peut générer de nouveaux contrats à mesure que les gens interagissent avec lui. Étant donné que vous connaissez la structure de ces contrats (ABI, événements, etc.) à l'avance, vous pouvez définir comment vous souhaitez les indexer dans un modèle. Lorsqu'ils sont générés, votre subgraph créera une source de données dynamique en fournissant l'adresse du contrat. -Consultez la section "Instanciation d'un modèle de source de données" sur : [Modèles de source de données](/developing/creating-a-subgraph/#data-source-templates). +Consultez la section "Instanciation d'un modèle de source de données" sur : [Modèles de sources de données](/developing/creating-a-subgraph/#data-source-templates). -### 11. Est-il possible de configurer un subgraph en utilisant `graph init` de `graph-cli` avec deux contrats ? Ou devrais-je ajouter manuellement une autre source de données dans `subgraph.yaml` après avoir exécuté `graph init` ? +### 11. Est-il possible de configurer un subgraph en utilisant `graph init` à partir de `graph-cli` avec deux contrats ? Ou dois-je ajouter manuellement une autre source de données dans `subgraph.yaml` après avoir lancé `graph init` ? -Oui. Avec la commande `graph init` elle-même, vous pouvez ajouter plusieurs sources de données en entrant les contrats l'un après l'autre. +Oui. Dans la commande `graph init` elle-même, vous pouvez ajouter plusieurs sources de données en entrant des contrats l'un après l'autre. Vous pouvez également utiliser la commande `graph add` pour ajouter une nouvelle source de données. @@ -76,21 +77,21 @@ docker pull graphprotocol/graph-node:dernier ### 14. Quelle est la méthode recommandée pour créer des Ids "autogénérés" pour une entité pendant la gestion des événements ? -Si une seule entité est créée lors de l'événement et s'il n'y a rien de mieux disponible,alors le hachage de transaction + index de journal serait unique. Vous pouvez les masquer en les convertissant en octets, puis en les redirigeant vers `crypto.keccak256`, mais cela ne le rendra pas plus unique. +Si une seule entité est créée pendant l'événement et s'il n'y a rien de mieux disponible, alors le hash de la transaction + l'index du journal seront uniques. Vous pouvez les obscurcir en les convertissant en Bytes et en les faisant passer par `crypto.keccak256`, mais cela ne les rendra pas plus uniques. ### 15. Puis-je supprimer mon subgraph ? -Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [transfer](/subgraphs/developing/managing/transferring-a-subgraph/) your subgraph. +Oui, vous pouvez [supprimer](/subgraphs/developing/managing/deleting-a-subgraph/) et [transférer](/subgraphs/developing/managing/transferring-a-subgraph/) votre subgraph. ## Relatif au Réseau ### 16. Quels réseaux sont supportés par The Graph? -Vous pouvez trouver la liste des réseaux supportés [ici](/supported-networks/). +Vous pouvez trouver la liste des réseaux pris en charge [ici](/supported-networks/). ### 17. Est-il possible de faire la différence entre les réseaux (mainnet, Sepolia, local) dans les gestionnaires d'événements? -Oui. Vous pouvez le faire en important `graph-ts` comme dans l'exemple ci-dessous : +Oui, vous pouvez le faire en important `graph-ts` comme dans l'exemple ci-dessous : ```javascript importez { dataSource } de '@graphprotocol/graph-ts' @@ -103,15 +104,15 @@ dataSource.address() Oui. Sepolia prend en charge les gestionnaires de blocs, les gestionnaires d'appels et les gestionnaires d'événements. Il convient de noter que les gestionnaires d'événements sont beaucoup plus performants que les deux autres gestionnaires, et ils sont pris en charge sur tous les réseaux compatibles EVM. -## En rapport avec l'indexation & les requêtes +## En rapport avec L'Indexation & L'interrogation ### 19. Est-il possible de spécifier à partir de quel bloc commencer l'indexation? -Oui. `dataSources.source.startBlock` dans le fichier `subgraph.yaml` spécifie le numéro du bloc à partir duquel la Source de donnée commence l'indexation. Dans la plupart des cas, nous suggérons d'utiliser le bloc où le contrat a été créé : [Blocs de départ](/developing/creating-a-subgraph/#start-blocks) +Oui. `dataSources.source.startBlock` dans le fichier `subgraph.yaml` spécifie le numéro du bloc à partir duquel la source de données commence l'indexation. Dans la plupart des cas, nous suggérons d'utiliser le bloc où le contrat a été créé : [Start blocks](/developing/creating-a-subgraph/#start-blocks) ### 20. Quels sont quelques conseils pour augmenter les performances d'indexation? Mon subgraph prend beaucoup de temps à se synchroniser -Oui, vous devriez consulter la fonctionnalité optionnelle de bloc de départ pour commencer l'indexation à partir du bloc où le contrat a été déployé : [Blocs de départ](/developing/creating-a-subgraph/#start-blocks) +Oui, vous devriez jeter un coup d'œil à la fonctionnalité optionnelle de bloc de démarrage pour commencer l'indexation à partir du bloc où le contrat a été déployé : [Start blocks](/developing/creating-a-subgraph/#start-blocks) ### 21. Existe-t-il un moyen d'interroger directement le subgraph pour déterminer le dernier numéro de bloc qu'il a indexé? @@ -126,12 +127,12 @@ curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"or Par défaut, les réponses aux requêtes sont limitées à 100 éléments par collection. Si vous voulez en recevoir plus, vous pouvez aller jusqu'à 1000 éléments par collection et au-delà, vous pouvez paginer avec : ```graphql -quelquesCollection(first: 1000, skip: ) { ... } +someCollection(first: 1000, skip: ) { ... } ``` -### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? +### 23. Si mon application décentralisée (dapp) utilise The Graph pour effectuer des requêtes, dois-je écrire ma clé API directement dans le code du frontend ? Et si nous payons les frais de requête pour les utilisateurs – des utilisateurs malveillants pourraient-ils faire augmenter considérablement nos frais de requête ? -Actuellement, l'approche recommandée pour un dapp est d'ajouter la clé au frontend et de l'exposer aux utilisateurs finaux. Cela dit, vous pouvez limiter cette clé à un nom d'hôte, comme _yourdapp.io_ et subgraph. La passerelle est actuellement gérée par Edge & Node. Une partie de la responsabilité d'une passerelle est de surveiller les comportements abusifs et de bloquer le trafic des clients malveillants. +Actuellement, l'approche recommandée pour une dapp est d'ajouter la clé au frontend et de l'exposer aux utilisateurs finaux. Cela dit, vous pouvez limiter cette clé à un nom d'hôte, comme _yourdapp.io_ et subgraph. La passerelle est actuellement gérée par Edge & Node. Une partie de la responsabilité d'une passerelle est de surveiller les comportements abusifs et de bloquer le trafic des clients malveillants. ## Divers @@ -142,6 +143,6 @@ La fédération n'est pas encore supportée. Pour le moment, vous pouvez utilise ### 25. Je veux contribuer ou ajouter un problème GitHub. Où puis-je trouver les dépôts open source? - [graph-node](https://github.com/graphprotocol/graph-node) -- [l'outil de graph](https://github.com/graphprotocol/graph-tooling) +- [graph-tooling](https://github.com/graphprotocol/graph-tooling) - [graph-docs](https://github.com/graphprotocol/docs) - [graph-client](https://github.com/graphprotocol/graph-client) From fe28d3d8d48ca30b6620e07137b7255f1b377aac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:26 -0500 Subject: [PATCH 0931/1534] New translations developer-faq.mdx (Spanish) --- .../pages/es/subgraphs/developing/developer-faq.mdx | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/developer-faq.mdx b/website/src/pages/es/subgraphs/developing/developer-faq.mdx index 370adeca6839..0a3bad37fd09 100644 --- a/website/src/pages/es/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/es/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Preguntas Frecuentes de los Desarrolladores +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -50,7 +51,7 @@ Dentro de un subgrafo, los eventos se procesan siempre en el orden en que aparec Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. -Consulta la sección "Instantiating a data source template" en: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). ### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -Si sólo se crea una entidad durante el evento y si no hay nada mejor disponible, entonces el hash de la transacción + el índice del registro serían únicos. Puedes ofuscar esto convirtiendo eso en Bytes y luego pasándolo por `crypto.keccak256` pero esto no lo hará más único. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. What networks are supported by The Graph? -Puedes encontrar la lista de redes admitidas [aquí](/supported-networks/). +You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -Sí. Puedes hacerlo importando `graph-ts` como en el ejemplo siguiente: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' From 134a9b9dad0aa37847774dacc7ecf029210d763b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:27 -0500 Subject: [PATCH 0932/1534] New translations developer-faq.mdx (Arabic) --- .../src/pages/ar/subgraphs/developing/developer-faq.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/developer-faq.mdx b/website/src/pages/ar/subgraphs/developing/developer-faq.mdx index c740d2680695..f0e9ba0cd865 100644 --- a/website/src/pages/ar/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ar/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: الأسئلة الشائعة للمطورين +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -إذا تم إنشاء كيان واحد فقط أثناء الحدث ولم يكن هناك أي شيء متاح بشكل أفضل ، فسيكون hash الإجراء + فهرس السجل فريدا. يمكنك إبهامها عن طريق تحويلها إلى Bytes ثم تمريرها عبر `crypto.keccak256` ولكن هذا لن يجعلها فريدة من نوعها. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -90,7 +91,7 @@ You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -نعم. يمكنك القيام بذلك عن طريق استيراد `graph-ts` كما في المثال أدناه: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript 'import { dataSource } from '@graphprotocol/graph-ts From 47e63cac6dba4e79807e7bf8b135712258294acb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:28 -0500 Subject: [PATCH 0933/1534] New translations developer-faq.mdx (Czech) --- .../pages/cs/subgraphs/developing/developer-faq.mdx | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/developer-faq.mdx b/website/src/pages/cs/subgraphs/developing/developer-faq.mdx index 095bef559f01..e07a7f06fb48 100644 --- a/website/src/pages/cs/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/cs/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: FAQs pro vývojáře +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -50,7 +51,7 @@ V rámci podgrafu se události zpracovávají vždy v pořadí, v jakém se obje Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. -Podívejte se do části "Instancování šablony zdroje dat" na: [Šablony datových zdrojů](/developing/creating-a-subgraph/#data-source-templates). +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). ### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -Pokud je během události vytvořena pouze jedna entita a pokud není k dispozici nic lepšího, pak by hash transakce + index protokolu byly jedinečné. Můžete je obfuskovat tak, že je převedete na bajty a pak je proženete přes `crypto.keccak256`, ale tím se jejich jedinečnost nezvýší. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. What networks are supported by The Graph? -Seznam podporovaných sítí najdete [zde](/supported-networks/). +You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -Ano, můžete to provést importováním `graph-ts` podle níže uvedeného příkladu: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' From f53d04c23bbc9b0a9273c5e8d6c9efbe965e3f5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:29 -0500 Subject: [PATCH 0934/1534] New translations developer-faq.mdx (German) --- website/src/pages/de/subgraphs/developing/developer-faq.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/de/subgraphs/developing/developer-faq.mdx b/website/src/pages/de/subgraphs/developing/developer-faq.mdx index 8a457a3db5c3..8dbe6d23ad39 100644 --- a/website/src/pages/de/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/de/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Developer FAQs +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. From 4ea8ee723f384b215bd50c1b047e091e35ca3ec5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:30 -0500 Subject: [PATCH 0935/1534] New translations developer-faq.mdx (Italian) --- website/src/pages/it/subgraphs/developing/developer-faq.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/developing/developer-faq.mdx b/website/src/pages/it/subgraphs/developing/developer-faq.mdx index 8a457a3db5c3..8dbe6d23ad39 100644 --- a/website/src/pages/it/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/it/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Developer FAQs +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. From f07103394b1aead2dadcccb35c5b9e4513d52110 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:31 -0500 Subject: [PATCH 0936/1534] New translations developer-faq.mdx (Japanese) --- .../pages/ja/subgraphs/developing/developer-faq.mdx | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/developer-faq.mdx b/website/src/pages/ja/subgraphs/developing/developer-faq.mdx index d241a0b43748..9744d7d9a53d 100644 --- a/website/src/pages/ja/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ja/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: 開発者 FAQ +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -50,7 +51,7 @@ One possible alternative solution to this is to store raw data in entities and p Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. -データソース・テンプレートのインスタンス化」のセクションをご覧ください: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). ### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -もし、イベント中に 1 つのエンティティしか作成されず、他に利用できるものがなければ、トランザクションハッシュ+ログインデックスがユニークになります。Bytes に変換して`crypto.keccak256`に通すことで難読化することができますが、これでは一意性は高まりません。 +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. What networks are supported by The Graph? -対応ネットワークの一覧は[こちら](/supported-networks/)で確認できます。 +You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -はい、以下の例のように`graph-ts`をインポートすることで可能です。 +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' From 2c650da06b171a4b7013b2454542ac12664feecc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:32 -0500 Subject: [PATCH 0937/1534] New translations developer-faq.mdx (Korean) --- website/src/pages/ko/subgraphs/developing/developer-faq.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/developing/developer-faq.mdx b/website/src/pages/ko/subgraphs/developing/developer-faq.mdx index 8a457a3db5c3..8dbe6d23ad39 100644 --- a/website/src/pages/ko/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ko/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Developer FAQs +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. From 2e7e9c5c4ffb593663f60e015ee59c8fa98a2b2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:33 -0500 Subject: [PATCH 0938/1534] New translations developer-faq.mdx (Dutch) --- website/src/pages/nl/subgraphs/developing/developer-faq.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/developing/developer-faq.mdx b/website/src/pages/nl/subgraphs/developing/developer-faq.mdx index cb3410cc5f5f..8dbe6d23ad39 100644 --- a/website/src/pages/nl/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/nl/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Ontwikkelaar FAQs +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. From f6d4e96478dec9ed5bc9beea0aac0a7bdeef2f57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:34 -0500 Subject: [PATCH 0939/1534] New translations developer-faq.mdx (Polish) --- website/src/pages/pl/subgraphs/developing/developer-faq.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/developing/developer-faq.mdx b/website/src/pages/pl/subgraphs/developing/developer-faq.mdx index 7997bcf012d2..8dbe6d23ad39 100644 --- a/website/src/pages/pl/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/pl/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: FAQs dla developerów +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. From 7b0b6569d28d91fcbdaec10f14b45465fa853936 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:36 -0500 Subject: [PATCH 0940/1534] New translations developer-faq.mdx (Portuguese) --- .../pages/pt/subgraphs/developing/developer-faq.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/developer-faq.mdx b/website/src/pages/pt/subgraphs/developing/developer-faq.mdx index 36d4325f4fd2..94f963a2fa3a 100644 --- a/website/src/pages/pt/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/pt/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Perguntas Frequentes dos Programadores +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -34,7 +35,7 @@ Deve relançar o subgraph, mas se a ID do subgraph (hash IPFS) não mudar, ele n ### 7. How do I call a contract function or access a public state variable from my subgraph mappings? -Confira o estado `Access to smart contract` dentro da seção [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). +Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). ### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? @@ -50,7 +51,7 @@ Dentro de um subgraph, os eventos são sempre processados na ordem em que aparec Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. -Confira a seção "como instanciar um modelo de fontes de dados" em: [Modelos de fontes de dados](/developing/creating-a-subgraph/#data-source-templates). +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). ### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -Se só uma entidade for criada durante o evento e não houver nada melhor disponível, então o hash da transação + o index do log será original. Podes ofuscá-los ao converter aquilo em Bytes e então o colocar pelo `crypto.keccak256`, mas isto não o fará mais original. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. What networks are supported by The Graph? -Veja a lista das redes apoiadas [aqui](/supported-networks/). +You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -Sim. Isto é possível ao importar o `graph-ts` como no exemplo abaixo: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' From 62401b632b00bc1c42657dc8ab6261ac66f87045 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:37 -0500 Subject: [PATCH 0941/1534] New translations developer-faq.mdx (Russian) --- .../ru/subgraphs/developing/developer-faq.mdx | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/developer-faq.mdx b/website/src/pages/ru/subgraphs/developing/developer-faq.mdx index 109268e58b12..4c5aa00bf9cf 100644 --- a/website/src/pages/ru/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ru/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Часто задаваемы вопросы для разработчиков +title: Developer FAQ +sidebarTitle: FAQ --- На этой странице собраны некоторые из наиболее частых вопросов для разработчиков, использующих The Graph. @@ -12,7 +13,7 @@ title: Часто задаваемы вопросы для разработчи ### 2. Каков первый шаг в создании субграфа? -Для успешного создания субграфа Вам потребуется установить The Graph CLI. Перед началом работы, ознакомьтесь с разделом [Быстрый старт](/subgraphs/quick-start/). Подробную информацию см. в разделе [Создание субграфа](/developing/creating-a-subgraph/). +To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). ### 3. Могу ли я создать субграф, если в моих смарт-контрактах нет событий? @@ -34,9 +35,9 @@ title: Часто задаваемы вопросы для разработчи ### 7. Как вызвать контрактную функцию или получить доступ к публичной переменной состояния из моих мэппингов субграфа? -Просмотрите положение `Доступ к смарт-контракту` в разделе [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). +Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Могу ли я импортировать `ethers.js` или другие библиотеки JS в мои мэппинги субграфов? +### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? В настоящее время нет, так как мэппинги написаны на языке AssemblyScript. @@ -50,13 +51,13 @@ title: Часто задаваемы вопросы для разработчи Шаблоны позволяют Вам быстро создавать источники данных, пока Ваш субграф индексируется. Ваш контракт может создавать новые контракты по мере того, как люди будут с ним взаимодействовать. Поскольку форма этих контрактов (ABI, события и т. д.) известна заранее, Вы сможете определить, как Вы хотите индексировать их в шаблоне. Когда они будут сгенерированы, Ваш субграф создаст динамический источник данных, предоставив адрес контракта. -Ознакомьтесь с параграфом "Создание экземпляра шаблона источника данных" в разделе: [Шаблоны источников данных](/developing/creating-a-subgraph/#data-source-templates). +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. Можно ли настроить субграф с помощью `graph init` из `graph-cli` с двумя контрактами? Или мне следует вручную добавить другой источник данных в `subgraph.yaml` после запуска `graph init`? +### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? -Да. В самой команде `graph init` Вы можете добавлять несколько источников данных, вводя контракты один за другим. +Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. -Вы также можете использовать команду `graph add` для добавления нового источника данных. +You can also use `graph add` command to add a new dataSource. ### 12. В каком порядке вызываются обработчики событий, блоков и вызовов для источника данных? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. Каков рекомендуемый способ создания "автоматически сгенерированных" идентификаторов для объекта при обработке событий? -Если во время события создается только один объект и нет ничего лучшего, то индекс хэша транзакции и журнала будет уникальным. Вы можете замаскировать их, преобразовав в байты, а затем, пропустив через `crypto.keccak256`, но это не сделает их более уникальными. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Могу ли я удалить свой субграф? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. Какие сети поддерживает The Graph? -Вы можете найти список поддерживаемых сетей [здесь](/supported-networks/). +You can find the list of the supported networks [here](/supported-networks/). ### 17. Можно ли различать сети (майннет, Sepolia, локальную) внутри обработчиков событий? -Да. Вы можете это сделать, импортировав `graph-ts`, как показано в примере ниже: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' @@ -103,15 +104,15 @@ dataSource.address() Да. Sepolia поддерживает обработчики блоков, обработчики вызовов и обработчики событий. Следует отметить, что обработчики событий намного более эффективны, чем два других обработчика, и они поддерживаются в каждой сети, совместимой с EVM. -## Вопросы, связанные с Индексированием & Запросами +## Indexing & Querying Related ### 19. Можно ли указать, с какого блока следует начинать индексирование? -Да. `dataSources.source.startBlock` в файле `subgraph.yaml` указывает номер блока, с которого источник данных начинает индексирование. В большинстве случаев мы предлагаем использовать блок, в котором создавался контракт: [Стартовые блоки](/developing/creating-a-subgraph/#start-blocks) +Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) ### 20. Есть ли какие-либо советы по увеличению производительности индексирования? Синхронизация моего субграфа занимает очень много времени -Да, Вам следует обратить внимание на дополнительную функцию стартового блока, чтобы начать индексирование с блока, в котором был развернут контракт: [Стартовые блоки](/developing/creating-a-subgraph/#start-blocks) +Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) ### 21. Есть ли способ напрямую запросить субграф, чтобы определить номер последнего проиндексированного блока? @@ -131,7 +132,7 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -В настоящее время рекомендуемым подходом для децентрализованного приложения является добавление ключа во внешний интерфейс и предоставление его конечным пользователям. При этом Вы можете ограничить этот ключ именем хоста, например _yourdapp.io_ и субграфом. Шлюз в настоящее время находится в управлении Edge & Node. Частью ответственности шлюза является отслеживание злоупотреблений и блокировка трафика от вредоносных клиентов. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Прочее From adae2f6b74b01a23a743a2a1431933c2e020e27c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:38 -0500 Subject: [PATCH 0942/1534] New translations developer-faq.mdx (Swedish) --- .../pages/sv/subgraphs/developing/developer-faq.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/developer-faq.mdx b/website/src/pages/sv/subgraphs/developing/developer-faq.mdx index 5ad177f33946..22b7ebed599f 100644 --- a/website/src/pages/sv/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/sv/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Vanliga frågor för utvecklare +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -50,7 +51,7 @@ Inom en subgraf behandlas händelser alltid i den ordning de visas i blocken, oa Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. -Kolla in avsnittet "Instansiera en mall för datakälla" på: [Mallar för datakällor](/developing/creating-a-subgraph/#data-source-templates). +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). ### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -Om endast en entitet skapas under händelsen och om inget bättre är tillgängligt, skulle transaktionshashen + loggindexet vara unikt. Du kan förvränga dessa genom att konvertera dem till bytes och sedan skicka dem genom `crypto.keccak256`, men detta kommer inte att göra dem mer unika. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -86,14 +87,14 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. What networks are supported by The Graph? -Du kan hitta listan över de stödda nätverken [här](/supported-networks/). +You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -Ja, du kan göra detta genom att importera `graph-ts` enligt exemplet nedan: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript -import { dataSource } from '@graphprotocol/graph-ts' +import { dataSource } from "@graphprotocol/graph-ts" dataSource.network() dataSource.address() From 3c25622decae0926a7256428b19d341e31d7ae85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:39 -0500 Subject: [PATCH 0943/1534] New translations developer-faq.mdx (Turkish) --- .../tr/subgraphs/developing/developer-faq.mdx | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/developer-faq.mdx b/website/src/pages/tr/subgraphs/developing/developer-faq.mdx index ea8f513353a1..d464a0058dfb 100644 --- a/website/src/pages/tr/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/tr/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Geliştirici SSS +title: Developer FAQ +sidebarTitle: FAQ --- Bu sayfa, The Graph üzerinde geliştirme yapan geliştiricilerin sunduğu en yaygın soruların bazılarını özetlemektedir. @@ -12,7 +13,7 @@ Bir subgraph, blokzinciri verilerine dayalı olarak oluşturulmuş özel yapım ### 2. Subgraph oluşturmanın ilk adımı nedir? -Başarılı bir şekilde subgraph oluşturmak için The Graph CLI’yi kurmanız gerekir. Başlamak için [Hızlı Başlangıç](/subgraphs/quick-start/) bölümüne göz atın. Ayrıntılı bilgi için [Subgraph Oluşturma](/developing/creating-a-subgraph/) bölümünü inceleyin. +To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). ### 3. Akıllı sözleşmelerim olay içermiyorsa yine de subgraph oluşturabilir miyim? @@ -34,9 +35,9 @@ Subgraph’i yeniden dağıtmanız gerekir ancak subgraph ID'si (IPFS hash’i) ### 7. Subgraph eşlemelerinden sözleşme fonksiyonunu nasıl çağırabilir veya bir genel durum değişkenine nasıl erişebilirim? -[AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state) bölümündeki `Akıllı sözleşmeye erişim` kısmına göz atın. +Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. Subgraph eşleyicilerinde `ethers.js` veya diğer JS kütüphanelerini kullanabilir miyim? +### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? Eşleyiciler AssemblyScript ile yazıldığından dolayı şu anda mümkün değil. @@ -50,13 +51,13 @@ Bir subgraph içindeki olaylar, birden fazla sözleşme üzerinde olup olmamaya Şablonlar, subgraph’iniz endeksleme yaparken veri kaynaklarını hızlıca oluşturmanızı sağlar. Sözleşmeniz, kullanıcılar etkileşime girdikçe yeni sözleşmeler yaratabilir. Bu sözleşmelerin yapısını (ABI, olaylar vb.) önceden bildiğinizden, onları nasıl endekslemek istediğinizi bir şablonda tanımlayabilirsiniz. Yeni sözleşmeler oluşturulduğunda, subgraph’iniz sözleşme adresini tespit ederek dinamik bir veri kaynağı oluşturacaktır. -"Bir Veri Kaynağı Şablonunu Başlatma" bölümüne göz atın: [Veri Kaynağı Şablonları](/developing/creating-a-subgraph/#data-source-templates). +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. `graph init` komutunu `graph-cli` ile kullanarak iki sözleşme içeren bir subgraph kurmak mümkün müdür? Yoksa `graph init` komutunu çalıştırdıktan sonra `subgraph.yaml` dosyasına elle bir başka dataSource eklemem mi gerekir? +### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? -Evet, mümkündür. `graph init` komutunu kullanırken, sözleşmeleri art arda girerek birden fazla dataSource ekleyebilirsiniz. +Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. -Ayrıca, yeni bir dataSource eklemek için `graph add` komutunu da kullanabilirsiniz. +You can also use `graph add` command to add a new dataSource. ### 12. Bir veri kaynağı için olay, blok ve çağrı işleyicileri hangi sırayla tetiklenir? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. Olayları işlerken bir varlık için "otomatik oluşturulan" id'leri yaratmanın önerilen yolu nedir? -Eğer olay sırasında yalnızca bir varlık oluşturuluyorsa ve daha iyi bir seçenek yoksa, işlem hash'i + log indisi benzersiz bir id olur. Bunları Bytes'a dönüştürüp `crypto.keccak256` üzerinden geçirerek gizleyebilirsiniz ancak bu işlemi daha benzersiz hale getirmez. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Subgraph'imi silebilir miyim? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. The Graph hangi ağları desteklemektedir? -Desteklenen ağların listesini [burada](/supported-networks/) bulabilirsiniz. +You can find the list of the supported networks [here](/supported-networks/). ### 17. Olay işleyicileri içerisinde ağlar (mainnet, Sepolia, yerel) arasında ayrım yapmak mümkün müdür? -Evet, mümkündür. Aşağıdaki örnekteki gibi `graph-ts` kullanarak yapabilirsiniz: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' @@ -103,15 +104,15 @@ dataSource.address() Evet. Sepolia, blok işleyicileri, çağrı işleyicileri ve olay işleyicilerini destekler. Olay işleyicilerinin diğer iki işleyiciye göre çok daha yüksek performansa sahip olduğu ve tüm EVM uyumlu ağlarda desteklendiği unutulmamalıdır. -## Endeksleme ve Sorgulama ile İlgili Sorular +## Indexing & Querying Related ### 19. Endekslemeye hangi bloktan başlanacağını belirtmek mümkün müdür? -Evet. `subgraph.yaml` dosyasındaki `dataSources.source.startBlock`, veri kaynağının endekslemeye başladığı blok numarasını belirtir. Çoğu durumda, sözleşmenin oluşturulduğu bloğun kullanılmasını öneriyoruz. Daha fazla bilgi için: [Başlangıç blokları](/developing/creating-a-subgraph/#start-blocks) +Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) ### 20. Endeksleme performansını artırmak için ipuçları var mı? Subgraph'imin senkronize edilmesi çok uzun zaman alıyor -Sözleşmenin dağıtıldığı bloktan itibaren endeksleme yapmak için opsiyonel başlangıç bloğu özelliğine göz atabilirsiniz: [Başlangıç blokları](/developing/creating-a-subgraph/#start-blocks) +Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) ### 21. Subgraph üzerinde doğrudan sorgulama yaparak endekslenmiş en son blok numarasını öğrenmenin bir yolu var mı? @@ -126,12 +127,12 @@ curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"or Varsayılan olarak, sorgu yanıtları koleksiyon başına 100 ögeyle sınırlıdır. Daha fazlasını almak istiyorsanız koleksiyon başına 1000 ögeye kadar çıkabilirsiniz. Daha da fazlası için şu şekilde sayfalama yapabilirsiniz: ```graphql -someCollection(first: 1000, skip: ) { ... } +someCollection(first: 1000, skip: ) { ... } ``` ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -Şu anda, bir merkeziyetsiz uygulama için önerilen yaklaşım, anahtarı önyüze eklemek ve son kullanıcılara göstermektir. Bununla birlikte, bu anahtarı _yourdapp.io_ gibi bir sunucu adına ve bir subgraph'e sınırlayabilirsiniz. Ağ geçidi şu anda Edge & Node tarafından çalıştırılıyor. Bir ağ geçidinin sorumluluğunun bir kısmı, kötü amaçlı davranışları izlemek ve kötü niyetli istemcilerden gelen trafiği engellemektir. +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. ## Diğer From b21b2a1153dcc18be6b0145efd8d27641f67b05e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:40 -0500 Subject: [PATCH 0944/1534] New translations developer-faq.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/developing/developer-faq.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/developing/developer-faq.mdx b/website/src/pages/uk/subgraphs/developing/developer-faq.mdx index ef99407db489..8dbe6d23ad39 100644 --- a/website/src/pages/uk/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/uk/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: FAQ для розробників +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. From 19200d44fb75fa9f5960a137559bd18ea03539e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:41 -0500 Subject: [PATCH 0945/1534] New translations developer-faq.mdx (Chinese Simplified) --- .../pages/zh/subgraphs/developing/developer-faq.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/developer-faq.mdx b/website/src/pages/zh/subgraphs/developing/developer-faq.mdx index 36cb36c8a674..dab117b8f2b5 100644 --- a/website/src/pages/zh/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/zh/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: 开发者常见问题 +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -50,7 +51,7 @@ One possible alternative solution to this is to store raw data in entities and p Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. -请查看“实例化数据源模板”部分:[数据源模板](/developing/creating-a-subgraph/#data-source-templates)。 +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). ### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -如果在事件期间只创建了一个实体并且没有更好的其他方法,那么交易hash + 日志索引的组合是唯一的。 您可以先将其转换为字节,然后将调用 `crypto.keccak256` 来混淆这些内容,但这不会使其更加独特。 +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. What networks are supported by The Graph? -您可以在[这里](/supported-networks/)找到支持的网络列表。 +You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -是的。 您可以按照以下示例通过导入 `graph-ts` 来做到这一点: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' @@ -141,7 +142,7 @@ Federation is not supported yet. At the moment, you can use schema stitching, ei ### 25. I want to contribute or add a GitHub issue. Where can I find the open source repositories? -- [图节点](https://github.com/graphprotocol/graph-node) +- [graph-node](https://github.com/graphprotocol/graph-node) - [graph-tooling](https://github.com/graphprotocol/graph-tooling) - [graph-docs](https://github.com/graphprotocol/docs) - [graph-client](https://github.com/graphprotocol/graph-client) From 805c635d0c06b600d62e586931e52c19c113bde8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:42 -0500 Subject: [PATCH 0946/1534] New translations developer-faq.mdx (Urdu (Pakistan)) --- .../pages/ur/subgraphs/developing/developer-faq.mdx | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/developer-faq.mdx b/website/src/pages/ur/subgraphs/developing/developer-faq.mdx index 03912f36a74f..ca250f41cc35 100644 --- a/website/src/pages/ur/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/ur/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: ڈویلپر کے اکثر پوچھے گئے سوالات +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -50,7 +51,7 @@ One possible alternative solution to this is to store raw data in entities and p Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. -[ڈیٹا سورس ٹیمپلیٹس](/developing/creating-a-subgraph/#data-source-templates) پر "ڈیٹا سورس ٹیمپلیٹ کو تیز کرنا" سیکشن دیکھیں. +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). ### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -اگر ایونٹ کے دوران صرف ایک ہستی بنائی جاتی ہے اور اگر اس سے بہتر کوئی چیز دستیاب نہیں ہے، تو ٹرانزیکشن ہیش + لاگ انڈیکس منفرد ہوگا۔ آپ اسے بائٹس میں تبدیل کرکے اور پھر اسے `crypto.keccak256` کے ذریعے پائپ کر کے مبہم کر سکتے ہیں لیکن یہ اسے مزید منفرد نہیں بنائے گا. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. What networks are supported by The Graph? -آپ کو تعاون یافتہ نیٹ ورکس کی فہرست [یہاں](/supported-networks/) مل سکتی ہے. +You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -جی ہاں. آپ ذیل کی مثال کے مطابق `graph-ts` درآمد کر کے ایسا کر سکتے ہیں: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' From 5874f04b44f01fb330b1bbd2db8a1b6cd9771b21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:43 -0500 Subject: [PATCH 0947/1534] New translations developer-faq.mdx (Vietnamese) --- .../src/pages/vi/subgraphs/developing/developer-faq.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/developer-faq.mdx b/website/src/pages/vi/subgraphs/developing/developer-faq.mdx index 90bc9950e12e..867c704194ab 100644 --- a/website/src/pages/vi/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/vi/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: Câu hỏi thường gặp dành cho nhà phát triển +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -Nếu chỉ một thực thể được tạo trong sự kiện và nếu không có gì tốt hơn khả dụng, thì chỉ mục log + băm giao dịch sẽ là duy nhất. Bạn có thể làm xáo trộn chúng bằng cách chuyển đổi nó thành Byte và sau đó chuyển nó qua`crypto.keccak256` nhưng điều này sẽ không làm cho nó độc đáo hơn. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -90,7 +91,7 @@ You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -Đúng. Bạn có thể thực hiện việc này bằng cách nhập `graph-ts` theo ví dụ bên dưới: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' From ea6a35bf44b6119ffae84565a8998a9116a828c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:44 -0500 Subject: [PATCH 0948/1534] New translations developer-faq.mdx (Marathi) --- .../mr/subgraphs/developing/developer-faq.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/developer-faq.mdx b/website/src/pages/mr/subgraphs/developing/developer-faq.mdx index 8c4f3ac6578c..8578be282aad 100644 --- a/website/src/pages/mr/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/mr/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: विकसक वारंवार विचारले जाणारे प्रश्न +title: Developer FAQ +sidebarTitle: FAQ --- This page summarizes some of the most common questions for developers building on The Graph. @@ -50,7 +51,7 @@ One possible alternative solution to this is to store raw data in entities and p Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. -[डेटा स्रोत टेम्पलेट्स](/developing/creating-a-subgraph/#data-source-templates) यावर "डेटा स्त्रोत टेम्पलेट इन्स्टंटिएटिंग करणे" विभाग पहा. +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). ### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? @@ -76,7 +77,7 @@ When new dynamic data source are created, the handlers defined for dynamic data ### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? -कार्यक्रमादरम्यान फक्त एकच अस्तित्व तयार केले असल्यास आणि त्यापेक्षा चांगले काही उपलब्ध नसल्यास, व्यवहार हॅश + लॉग इंडेक्स अद्वितीय असेल. तुम्ही ते बाइट्समध्ये रूपांतरित करून आणि नंतर `crypto.keccak256` द्वारे पाइपिंग करून त्यांना अस्पष्ट करू शकता परंतु यामुळे ते अधिक अद्वितीय होणार नाही. +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. Can I delete my subgraph? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. What networks are supported by The Graph? -तुम्ही समर्थित नेटवर्कची सूची [येथे](/supported-networks/) शोधू शकता. +You can find the list of the supported networks [here](/supported-networks/). ### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? -होय. तुम्ही खालील उदाहरणानुसार `graph-ts` इंपोर्ट करून हे करू शकता: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' @@ -126,7 +127,7 @@ curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"or By default, query responses are limited to 100 items per collection. If you want to receive more, you can go up to 1000 items per collection and beyond that, you can paginate with: ```graphql -काही संकलन(प्रथम: 1000, वगळा: <संख्या>) { ... } +someCollection(first: 1000, skip: ) { ... } ``` ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? @@ -141,7 +142,7 @@ Federation is not supported yet. At the moment, you can use schema stitching, ei ### 25. I want to contribute or add a GitHub issue. Where can I find the open source repositories? -- [आलेख नोड](https://github.com/graphprotocol/graph-node) +- [graph-node](https://github.com/graphprotocol/graph-node) - [graph-tooling](https://github.com/graphprotocol/graph-tooling) - [graph-docs](https://github.com/graphprotocol/docs) - [graph-client](https://github.com/graphprotocol/graph-client) From ddd7d2f05d10fae1c0cb10936b4330383a2d8ae3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:45 -0500 Subject: [PATCH 0949/1534] New translations developer-faq.mdx (Hindi) --- .../hi/subgraphs/developing/developer-faq.mdx | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/developer-faq.mdx b/website/src/pages/hi/subgraphs/developing/developer-faq.mdx index 3f99a88b5c9f..2d608e2cf7b1 100644 --- a/website/src/pages/hi/subgraphs/developing/developer-faq.mdx +++ b/website/src/pages/hi/subgraphs/developing/developer-faq.mdx @@ -1,5 +1,6 @@ --- -title: डेवलपर अक्सर पूछे जाने वाले प्रश्न +title: Developer FAQ +sidebarTitle: FAQ --- यह पृष्ठ 'The Graph' पर निर्माण कर रहे डेवलपर्स के लिए कुछ सामान्य प्रश्नों का सारांश प्रस्तुत करता है। @@ -12,7 +13,7 @@ title: डेवलपर अक्सर पूछे जाने वाले ### 2. एक Subgraph बनाने का पहला कदम क्या है? -एक सबग्रह को सफलतापूर्वक बनाने के लिए, आपको The Graph CLI स्थापित करने की आवश्यकता होगी। आरंभ करने के लिए [Quick Start](/subgraphs/quick-start/) की समीक्षा करें। विस्तृत जानकारी के लिए, देखें [Creating a Subgraph](/developing/creating-a-subgraph/)। +To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/subgraphs/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). ### 3. क्या मैं अभी भी एक subgraph बना सकता हूँ यदि मेरी स्मार्ट कॉन्ट्रैक्ट्स में कोई इवेंट्स नहीं हैं? @@ -36,7 +37,7 @@ title: डेवलपर अक्सर पूछे जाने वाले Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/subgraphs/developing/creating/graph-ts/api/#access-to-smart-contract-state). -### 8. क्या मैं अपने subgraph mappings में `ethers.js` या अन्य JS पुस्तकालय आयात कर सकता हूँ? +### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? AssemblyScript में वर्तमान में मैपिंग्स नहीं लिखी जा रही हैं। @@ -50,13 +51,13 @@ AssemblyScript में वर्तमान में मैपिंग् Templates आपको डेटा स्रोतों को तेजी से बनाने की अनुमति देते हैं, जबकि आपका subgraph इंडेक्सिंग कर रहा है। आपका कॉन्ट्रैक्ट नए कॉन्ट्रैक्ट उत्पन्न कर सकता है जब लोग इसके साथ इंटरैक्ट करते हैं। चूंकि आप उन कॉन्ट्रैक्टों का आकार (ABI, इवेंट, आदि) पहले से जानते हैं, आप यह निर्धारित कर सकते हैं कि आप उन्हें एक टेम्पलेट में कैसे इंडेक्स करना चाहते हैं। जब वे उत्पन्न होते हैं, तो आपका subgraph कॉन्ट्रैक्ट पते को प्रदान करके एक डायनामिक डेटा स्रोत बनाएगा। -"डेटा स्रोत टेम्प्लेट को तत्काल बनाना" अनुभाग देखें: [डेटा स्रोत टेम्प्लेट](/developing/creating-a-subgraph/#data-source-templates)। +Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates). -### 11. क्या `graph init` का उपयोग करके `graph-cli` से एक subgraph सेट करना संभव है जिसमें दो कॉन्ट्रैक्ट हैं? या मुझे `graph init` चलाने के बाद `subgraph.yaml` में एक और dataSource मैन्युअल रूप से जोड़ना चाहिए? +### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? -हाँ। `graph init` कमांड पर आप एक के बाद एक कॉन्ट्रैक्ट दर्ज करके कई dataSources जोड़ सकते हैं। +Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. -आप इसका भी उपयोग कर सकते हैं `graph add` कमांड एक नया dataSource जोड़ने के लिए। +You can also use `graph add` command to add a new dataSource. ### 12. एक डेटा स्रोत के लिए इवेंट, ब्लॉक और कॉल हैंडलर्स को किस क्रम में ट्रिगर किया जाता है? @@ -76,7 +77,7 @@ docker pull graphprotocol/graph-node:latest ### 14. इवेंट्स को हैंडल करते समय एक एंटिटी के लिए "स्वतः उत्पन्न" आईडी बनाने का अनुशंसित तरीका क्या है? -यदि घटना के दौरान केवल एक इकाई बनाई जाती है और यदि कुछ भी बेहतर उपलब्ध नहीं है, तो लेन-देन हैश + लॉग इंडेक्स अद्वितीय होगा। आप इन्हें बाइट्स में परिवर्तित करके और फिर इसे `crypto.keccak256` के माध्यम से पाइप करके अस्पष्ट कर सकते हैं, लेकिन यह इसे और अधिक विशिष्ट नहीं बनाएगा। +If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. ### 15. क्या मैं अपना subgraph हटा सकता हूँ? @@ -86,11 +87,11 @@ Yes, you can [delete](/subgraphs/developing/managing/deleting-a-subgraph/) and [ ### 16. The Graph द्वारा समर्थित नेटवर्क कौन से हैं? -आप समर्थित नेटवर्क की सूची [यहां](/supported-networks/) प्राप्त कर सकते हैं। +You can find the list of the supported networks [here](/supported-networks/). ### 17. क्या इवेंट हैंडलर्स के भीतर नेटवर्क (mainnet, Sepolia, local) के बीच अंतर करना संभव है? -हाँ। नीचे दिए गए उदाहरण के अनुसार आप `ग्राफ़-टीएस` आयात करके ऐसा कर सकते हैं: +Yes. You can do this by importing `graph-ts` as per the example below: ```javascript import { dataSource } from '@graphprotocol/graph-ts' @@ -103,15 +104,15 @@ dataSource.address() Yes. Sepolia supports block handlers, call handlers and event handlers. It should be noted that event handlers are far more performant than the other two handlers, and they are supported on every EVM-compatible network. -## Indexing & Querying से संबंधित +## Indexing & Querying Related ### 19. क्या यह संभव है कि किस ब्लॉक से इंडेक्सिंग शुरू की जाए? -`dataSources.source.startBlock` `subgraph.yaml` फ़ाइल में उस ब्लॉक का नंबर निर्दिष्ट करता है जिससे dataSource डेटा इंडेक्स करना शुरू करता है। अधिकांश मामलों में, हम अनुशंसा करते हैं कि उस ब्लॉक का उपयोग करें जहाँ अनुबंध बनाया गया था: [Start blocks](/developing/creating-a-subgraph/#start-blocks) +Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph/#start-blocks) ### 20. यहां कुछ सुझाव दिए गए हैं ताकि इंडेक्सिंग का प्रदर्शन बढ़ सके। मेरा subgraph बहुत लंबे समय तक सिंक होने में समय ले रहा है। -आपको वैकल्पिक 'स्टार्ट ब्लॉक' विशेषता पर एक नज़र डालनी चाहिए ताकि आप उस ब्लॉक से अनुक्रमण शुरू कर सकें जहां अनुबंध को तैनात किया गया था: [Start blocks](/developing/creating-a-subgraph/#start-blocks) +Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph/#start-blocks) ### 21. क्या कोई तरीका है कि 'subgraph' को सीधे क्वेरी करके यह पता लगाया जा सके कि उसने कौन सा लेटेस्ट ब्लॉक नंबर इंडेक्स किया है? @@ -131,11 +132,11 @@ someCollection(first: 1000, skip: ) { ... } ### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? -वर्तमान में, एक dapp के लिए अनुशंसित तरीका यह है कि कुंजी को फ्रंटेंड में जोड़ा जाए और इसे एंड यूज़र्स के लिए एक्सपोज़ किया जाए। हालांकि, आप उस कुंजी को एक होस्टनेम जैसे _yourdapp.io_ और सबग्राफ़ तक सीमित कर सकते हैं। गेटवे को फिलहाल Edge & Node द्वारा चलाया जा रहा है। गेटवे की जिम्मेदारी का एक हिस्सा यह भी है कि वह दुरुपयोग करने वाले व्यवहार की निगरानी करे और दुर्भावनापूर्ण क्लाइंट्स से ट्रैफ़िक को ब्लॉक करे। +Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## विविध +## विविध -### क्या Apollo Federation का उपयोग graph-node के ऊपर किया जा सकता है? +### क्या Apollo Federation का उपयोग graph-node के ऊपर किया जा सकता है? Federation अभी समर्थित नहीं है। फिलहाल, आप schema stitching का उपयोग कर सकते हैं, या तो क्लाइंट पर या एक प्रॉक्सी सेवा के माध्यम से। From bab0eb32db179a41606c1892dfbf24c51029790e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:46 -0500 Subject: [PATCH 0950/1534] New translations introduction.mdx (Romanian) --- website/src/pages/ro/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ro/subgraphs/developing/introduction.mdx b/website/src/pages/ro/subgraphs/developing/introduction.mdx index 45cefaff16ef..615b6cec4c9c 100644 --- a/website/src/pages/ro/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ro/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Developing +title: Introduction to Subgraph Development +sidebarTitle: Introduction --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 26704b50b32577c859b8e59296ae02fd7e469aa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:47 -0500 Subject: [PATCH 0951/1534] New translations introduction.mdx (French) --- .../fr/subgraphs/developing/introduction.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/introduction.mdx b/website/src/pages/fr/subgraphs/developing/introduction.mdx index 330e2be76d8b..7956855d9d83 100644 --- a/website/src/pages/fr/subgraphs/developing/introduction.mdx +++ b/website/src/pages/fr/subgraphs/developing/introduction.mdx @@ -1,8 +1,9 @@ --- -title: Le Développement +title: Introduction au développement de subgraphs +sidebarTitle: Présentation --- -Pour commencer à coder immédiatement, rendez-vous sur [Démarrage rapide pour développeurs](/subgraphs/quick-start/). +Pour commencer à coder immédiatement, rendez-vous sur [Démarrage rapide pour les développeurs](/subgraphs/quick-start/). ## Aperçu @@ -10,16 +11,16 @@ En tant que développeur, vous avez besoin de données pour construire et alimen Sur The Graph, vous pouvez : -1. Créer, déployer et publier des subgraphs sur The Graph en utilisant Graph CLI et [Subgraph Studio](https://thegraph.com/studio/). -2. Utiliser GraphQL pour interroger des subgraphs existants. +1. Créer, déployer et publier des subgraphs sur The Graph à l'aide de Graph CLI et de [Subgraph Studio](https://thegraph.com/studio/). +2. Utiliser GraphQL pour interroger des subgraphs existants. ### Qu'est-ce que GraphQL ? -- [GraphQL](https://graphql.org/learn/) est le langage de requête pour les API et un un moteur d'exécution pour exécuter ces requêtes avec vos données existantes. The Graph utilise GraphQL pour interroger les subgraphs. +- [GraphQL](https://graphql.org/learn/) est un langage de requête pour les API et un moteur d'exécution permettant d'exécuter ces requêtes avec vos données existantes. The Graph utilise GraphQL pour interroger les subgraphs. ### Actions des Développeurs -- Interroger des subgraphs créés par d'autres développeurs dans [The Graph Network](https://thegraph.com/explorer) et les intégrer dans vos propres dapps. +- Interrogez les subgraphs construits par d'autres développeurs dans [The Graph Network](https://thegraph.com/explorer) et intégrez-les dans vos propres dapps. - Créer des subgraphs personnalisés pour répondre à des besoins de données spécifiques, permettant une meilleure évolutivité et flexibilité pour les autres développeurs. - Déployer, publier et signaler vos subgraphs au sein de The Graph Network. @@ -27,4 +28,4 @@ Sur The Graph, vous pouvez : Un subgraph est une API personnalisée construite sur des données blockchain. Il extrait des données d'une blockchain, les traite et les stocke afin qu'elles puissent être facilement interrogées via GraphQL. -Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. +Consultez la documentation sur les [subgraphs](/subgraphs/developing/subgraphs/) pour en savoir plus. From e0555c40a581f530681cdf27f505ffffaca7fbe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:48 -0500 Subject: [PATCH 0952/1534] New translations introduction.mdx (Spanish) --- website/src/pages/es/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/es/subgraphs/developing/introduction.mdx b/website/src/pages/es/subgraphs/developing/introduction.mdx index 4ee907fc2d63..7d4760cb4c35 100644 --- a/website/src/pages/es/subgraphs/developing/introduction.mdx +++ b/website/src/pages/es/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Desarrollando +title: Introduction to Subgraph Development +sidebarTitle: Introducción --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From ef839ef8ce9f30a5f23c9b4d0d18ceffa4b5bf6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:49 -0500 Subject: [PATCH 0953/1534] New translations introduction.mdx (Arabic) --- website/src/pages/ar/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ar/subgraphs/developing/introduction.mdx b/website/src/pages/ar/subgraphs/developing/introduction.mdx index b4d79001819a..d3b71aaab704 100644 --- a/website/src/pages/ar/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ar/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Developing +title: Introduction to Subgraph Development +sidebarTitle: مقدمة --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From dae0989f8c31024ab29d37b825ce98982a2383eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:50 -0500 Subject: [PATCH 0954/1534] New translations introduction.mdx (Czech) --- website/src/pages/cs/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/cs/subgraphs/developing/introduction.mdx b/website/src/pages/cs/subgraphs/developing/introduction.mdx index dd7c69ed878a..110d7639aded 100644 --- a/website/src/pages/cs/subgraphs/developing/introduction.mdx +++ b/website/src/pages/cs/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Vývoj +title: Introduction to Subgraph Development +sidebarTitle: Úvod --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 25d97f60ba87264845b29d4689f43670f9e930e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:51 -0500 Subject: [PATCH 0955/1534] New translations introduction.mdx (German) --- .../src/pages/de/subgraphs/developing/introduction.mdx | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/introduction.mdx b/website/src/pages/de/subgraphs/developing/introduction.mdx index 45cefaff16ef..fd2872880ce0 100644 --- a/website/src/pages/de/subgraphs/developing/introduction.mdx +++ b/website/src/pages/de/subgraphs/developing/introduction.mdx @@ -1,17 +1,18 @@ --- -title: Developing +title: Introduction to Subgraph Development +sidebarTitle: Einführung --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). -## Overview +## Überblick As a developer, you need data to build and power your dapp. Querying and indexing blockchain data is challenging, but The Graph provides a solution to this issue. On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 181a7b7d3ed55b977d85d439b8af42f6fd725e50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:52 -0500 Subject: [PATCH 0956/1534] New translations introduction.mdx (Italian) --- website/src/pages/it/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/it/subgraphs/developing/introduction.mdx b/website/src/pages/it/subgraphs/developing/introduction.mdx index f99cd534a1f6..53060bdd4de4 100644 --- a/website/src/pages/it/subgraphs/developing/introduction.mdx +++ b/website/src/pages/it/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Sviluppo +title: Introduction to Subgraph Development +sidebarTitle: Introduzione --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 1fc9565b6e60a1a8b216fe3e84d8a039297d9779 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:53 -0500 Subject: [PATCH 0957/1534] New translations introduction.mdx (Japanese) --- website/src/pages/ja/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ja/subgraphs/developing/introduction.mdx b/website/src/pages/ja/subgraphs/developing/introduction.mdx index e31b269d03e6..982e426ba4aa 100644 --- a/website/src/pages/ja/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ja/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: 現像 +title: Introduction to Subgraph Development +sidebarTitle: イントロダクション --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 2179f6adbc8df524a34f486e6dac73c7982f720f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:54 -0500 Subject: [PATCH 0958/1534] New translations introduction.mdx (Korean) --- website/src/pages/ko/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ko/subgraphs/developing/introduction.mdx b/website/src/pages/ko/subgraphs/developing/introduction.mdx index 45cefaff16ef..615b6cec4c9c 100644 --- a/website/src/pages/ko/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ko/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Developing +title: Introduction to Subgraph Development +sidebarTitle: Introduction --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 2c6c517cf4917b008aeb2be84768d2994f3bd3bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:55 -0500 Subject: [PATCH 0959/1534] New translations introduction.mdx (Dutch) --- website/src/pages/nl/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/nl/subgraphs/developing/introduction.mdx b/website/src/pages/nl/subgraphs/developing/introduction.mdx index 6b84b83bf60a..615b6cec4c9c 100644 --- a/website/src/pages/nl/subgraphs/developing/introduction.mdx +++ b/website/src/pages/nl/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Ontwikkelen +title: Introduction to Subgraph Development +sidebarTitle: Introduction --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 8b3e8c3c0f969dd7b7fc7848d02d3bd36841a4e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:56 -0500 Subject: [PATCH 0960/1534] New translations introduction.mdx (Polish) --- website/src/pages/pl/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/pl/subgraphs/developing/introduction.mdx b/website/src/pages/pl/subgraphs/developing/introduction.mdx index 45cefaff16ef..509b25654e82 100644 --- a/website/src/pages/pl/subgraphs/developing/introduction.mdx +++ b/website/src/pages/pl/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Developing +title: Introduction to Subgraph Development +sidebarTitle: Wstęp --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From c88a964f4197c9af690055326b74bd1c09ff5c2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:57 -0500 Subject: [PATCH 0961/1534] New translations introduction.mdx (Portuguese) --- .../src/pages/pt/subgraphs/developing/introduction.mdx | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/src/pages/pt/subgraphs/developing/introduction.mdx b/website/src/pages/pt/subgraphs/developing/introduction.mdx index ddf26f28f05d..e550867e2244 100644 --- a/website/src/pages/pt/subgraphs/developing/introduction.mdx +++ b/website/src/pages/pt/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Programação +title: Introduction to Subgraph Development +sidebarTitle: Introdução --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,10 +11,10 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. -### What is GraphQL? +### O Que é a GraphQL? - [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. From 9348cd991c307d81957ba42174e0f73574e72db8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:58 -0500 Subject: [PATCH 0962/1534] New translations introduction.mdx (Russian) --- .../ru/subgraphs/developing/introduction.mdx | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/website/src/pages/ru/subgraphs/developing/introduction.mdx b/website/src/pages/ru/subgraphs/developing/introduction.mdx index ac69c283c525..d5b1df06feae 100644 --- a/website/src/pages/ru/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ru/subgraphs/developing/introduction.mdx @@ -1,30 +1,31 @@ --- -title: Developing +title: Introduction to Subgraph Development +sidebarTitle: Introduction --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). ## Обзор -As a developer, you need data to build and power your dapp. Querying and indexing blockchain data is challenging, but The Graph provides a solution to this issue. +Как разработчику, Вам нужны данные для создания и поддержки Вашего децентрализованного приложения. Запрос и индексация данных блокчейна — сложная задача, но The Graph предлагает решение этой проблемы. -On The Graph, you can: +На The Graph Вы можете: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Использовать GraphQL для запроса существующих субграфов. -### What is GraphQL? +### Что такое GraphQL? - [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. -### Developer Actions +### Действия разработчика - Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. +- Создавайте собственные субграфы для удовлетворения конкретных потребностей в данных, обеспечивая улучшенную масштабируемость и гибкость для других разработчиков. +- Развертывайте, публикуйте и сигнализируйте о своих субграфах в The Graph Network. -### What are subgraphs? +### Что такое субграфы? -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. +Субграф — это пользовательский API, созданный на основе данных блокчейна. Он извлекает данные из блокчейна, обрабатывает их и сохраняет так, чтобы их можно было легко запросить через GraphQL. Check out the documentation on [subgraphs](/subgraphs/developing/subgraphs/) to learn specifics. From 41a60165634d57c77d4f4780147b95da8b69d307 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:01:59 -0500 Subject: [PATCH 0963/1534] New translations introduction.mdx (Swedish) --- website/src/pages/sv/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/sv/subgraphs/developing/introduction.mdx b/website/src/pages/sv/subgraphs/developing/introduction.mdx index f98e1d38c374..bf5f1bb0f311 100644 --- a/website/src/pages/sv/subgraphs/developing/introduction.mdx +++ b/website/src/pages/sv/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Utveckling +title: Introduction to Subgraph Development +sidebarTitle: Introduktion --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 9cbbec9d8a18ca36fc028df41462e69f5abf43e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:00 -0500 Subject: [PATCH 0964/1534] New translations introduction.mdx (Turkish) --- website/src/pages/tr/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/introduction.mdx b/website/src/pages/tr/subgraphs/developing/introduction.mdx index cb3033747b5a..6a76c8957cee 100644 --- a/website/src/pages/tr/subgraphs/developing/introduction.mdx +++ b/website/src/pages/tr/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Geliştirme +title: Introduction to Subgraph Development +sidebarTitle: Giriş --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From d6d8ab9429b83a7eb627e7cd31c77fade50ea096 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:01 -0500 Subject: [PATCH 0965/1534] New translations introduction.mdx (Ukrainian) --- website/src/pages/uk/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/uk/subgraphs/developing/introduction.mdx b/website/src/pages/uk/subgraphs/developing/introduction.mdx index da3e509b969d..615b6cec4c9c 100644 --- a/website/src/pages/uk/subgraphs/developing/introduction.mdx +++ b/website/src/pages/uk/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Розробка +title: Introduction to Subgraph Development +sidebarTitle: Introduction --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From ef9c86272e63053f1de8f2e2f30008ce6c245e30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:02 -0500 Subject: [PATCH 0966/1534] New translations introduction.mdx (Chinese Simplified) --- website/src/pages/zh/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/zh/subgraphs/developing/introduction.mdx b/website/src/pages/zh/subgraphs/developing/introduction.mdx index b7ea25dc49cd..a34bc90855b3 100644 --- a/website/src/pages/zh/subgraphs/developing/introduction.mdx +++ b/website/src/pages/zh/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: 开发 +title: Introduction to Subgraph Development +sidebarTitle: 介绍 --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From add6fa409c64b1dd7a6cb1e4cb9595f1c0cdbe45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:03 -0500 Subject: [PATCH 0967/1534] New translations introduction.mdx (Urdu (Pakistan)) --- website/src/pages/ur/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/ur/subgraphs/developing/introduction.mdx b/website/src/pages/ur/subgraphs/developing/introduction.mdx index 2ab9c83db44f..e7ab36598ccb 100644 --- a/website/src/pages/ur/subgraphs/developing/introduction.mdx +++ b/website/src/pages/ur/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: ڈویلپنگ +title: Introduction to Subgraph Development +sidebarTitle: تعارف --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 89f5303d0a3c30376761d232171a96fd3c77406e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:04 -0500 Subject: [PATCH 0968/1534] New translations introduction.mdx (Vietnamese) --- website/src/pages/vi/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/vi/subgraphs/developing/introduction.mdx b/website/src/pages/vi/subgraphs/developing/introduction.mdx index c92c181f3af0..ea7cc276b1d2 100644 --- a/website/src/pages/vi/subgraphs/developing/introduction.mdx +++ b/website/src/pages/vi/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: Developing +title: Introduction to Subgraph Development +sidebarTitle: Giới thiệu --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From dfe7cff2ba05c2a40ee3f8ec42dd927279595f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:05 -0500 Subject: [PATCH 0969/1534] New translations introduction.mdx (Marathi) --- website/src/pages/mr/subgraphs/developing/introduction.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/src/pages/mr/subgraphs/developing/introduction.mdx b/website/src/pages/mr/subgraphs/developing/introduction.mdx index 9f9f1251fc88..3123dd66f2a7 100644 --- a/website/src/pages/mr/subgraphs/developing/introduction.mdx +++ b/website/src/pages/mr/subgraphs/developing/introduction.mdx @@ -1,5 +1,6 @@ --- -title: विकसनशील +title: Introduction to Subgraph Development +sidebarTitle: Introduction --- To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). @@ -10,8 +11,8 @@ As a developer, you need data to build and power your dapp. Querying and indexin On The Graph, you can: -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. Use GraphQL to query existing subgraphs. ### What is GraphQL? From 657e9b97b19a59c90a74b9fb9b9b1ca8b763d34a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:06 -0500 Subject: [PATCH 0970/1534] New translations introduction.mdx (Hindi) --- .../pages/hi/subgraphs/developing/introduction.mdx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/src/pages/hi/subgraphs/developing/introduction.mdx b/website/src/pages/hi/subgraphs/developing/introduction.mdx index cf15f2f11e38..12e2aba18447 100644 --- a/website/src/pages/hi/subgraphs/developing/introduction.mdx +++ b/website/src/pages/hi/subgraphs/developing/introduction.mdx @@ -1,8 +1,9 @@ --- -title: विकसित होना +title: Introduction to Subgraph Development +sidebarTitle: Introduction --- -कोडिंग तुरंत शुरू करने के लिए, [डेवलपर क्विक स्टार्ट पर जाएं](/subgraphs/quick-start/)। +To start coding right away, go to [Developer Quick Start](/subgraphs/quick-start/). ## अवलोकन @@ -10,16 +11,16 @@ title: विकसित होना The Graph पर, आप: -1. "बनाएँ, डिप्लॉय करें, और The Graph में subgraphs प्रकाशित करें Graph CLI और" [Subgraph Studio](https://thegraph.com/studio/). -2. मौजूदा subgraphs को क्वेरी करने के लिए GraphQL का उपयोग करें। +1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). +2. मौजूदा subgraphs को क्वेरी करने के लिए GraphQL का उपयोग करें। ### GraphQL क्या है? -- [GraphQL](https://graphql.org/learn/) APIs के लिए एक क्वेरी भाषा और आपके मौजूदा डेटा के साथ उन क्वेरीज़ को पूरा करने के लिए एक रनटाइम। The Graph सबग्राफ को क्वेरी करने के लिए GraphQL का उपयोग करता है। +- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. ### डेवलपर क्रियाएँ -- अन्य डेवलपर्स द्वारा बनाए गए subgraphs को [The Graph Network](https://thegraph.com/explorer) में query करें और उन्हें अपनी dapps में एकीकृत करें। +- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. - विशिष्ट डेटा आवश्यकताओं को पूरा करने के लिए कस्टम सबग्राफ़ बनाएं, जिससे अन्य डेवलपर्स के लिए स्केलेबिलिटी और लचीलापन में सुधार हो सके। - अपने subgraphs को The Graph Network में तैनात करें, प्रकाशित करें और संकेत दें। From b222127bf82be8ead386dc978cba573f7a69d080 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:07 -0500 Subject: [PATCH 0971/1534] New translations deleting-a-subgraph.mdx (Romanian) --- .../ro/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ro/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1807741026ae..5a4ac15e07fd 100644 --- a/website/src/pages/ro/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ro/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 73a7278b9b8334d64dafdf29bbac6bacedbdf5a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:08 -0500 Subject: [PATCH 0972/1534] New translations deleting-a-subgraph.mdx (French) --- .../managing/deleting-a-subgraph.mdx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/fr/subgraphs/developing/managing/deleting-a-subgraph.mdx index 5e69052e4f4b..c74be2b234dd 100644 --- a/website/src/pages/fr/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/fr/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,31 +1,31 @@ --- -title: Delete a Subgraph +title: Suppression d'un Subgraph --- -Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). +Supprimez votre subgraph en utilisant [Subgraph Studio](https://thegraph.com/studio/). -> Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. +> En supprimant votre subgraph, vous supprimez toutes les versions publiées de The Graph Network, mais il restera visible sur Graph Explorer et Subgraph Studio pour les utilisateurs qui l'ont signalé. ## Étape par Étape -1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). +1. Visitez la page du subgraph sur [Subgraph Studio](https://thegraph.com/studio/). -2. Click on the three-dots to the right of the "publish" button. +2. Cliquez sur les trois points à droite du bouton "publier". -3. Click on the option to "delete this subgraph": +3. Cliquez sur l'option "delete this subgraph": ![Delete-subgraph](/img/Delete-subgraph.png) -4. Depending on the subgraph's status, you will be prompted with various options. +4. En fonction de l'état du subgraph, différentes options vous seront proposées. - - If the subgraph is not published, simply click “delete” and confirm. - - If the subgraph is published, you will need to confirm on your wallet before the subgraph can be deleted from Studio. If a subgraph is published to multiple networks, such as testnet and mainnet, additional steps may be required. + - Si le subgraph n'est pas publié, il suffit de cliquer sur “delete“ et de confirmer. + - Si le subgraph est publié, vous devrez le confirmer sur votre portefeuille avant de pouvoir le supprimer de Studio. Si un subgraph est publié sur plusieurs réseaux, tels que testnet et mainnet, des étapes supplémentaires peuvent être nécessaires. -> If the owner of the subgraph has signal on it, the signaled GRT will be returned to the owner. +> Si le propriétaire du subgraph l'a signalé, les GRT signalés seront renvoyés au propriétaire. -### Important Reminders +### Rappels importants -- Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. +- Une fois que vous avez supprimé un subgraph, il **n'apparaîtra plus** sur la page d'accueil de Graph Explorer. Toutefois, les utilisateurs qui ont signalé sur ce subgraph pourront toujours le voir sur leurs pages de profil et supprimer leur signal. - Les curateurs ne seront plus en mesure de signaler le subgraph. - Les Curateurs qui ont déjà signalé sur le subgraph peuvent retirer leur signal à un prix moyen par action. -- Deleted subgraphs will show an error message. +- Les subgraphs supprimés afficheront un message d'erreur. From 934543be467039eb6c5b525b2d5aaeb761a1b71e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:09 -0500 Subject: [PATCH 0973/1534] New translations deleting-a-subgraph.mdx (Spanish) --- .../es/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/es/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/es/subgraphs/developing/managing/deleting-a-subgraph.mdx index 0bd18777b42f..972a4f552c25 100644 --- a/website/src/pages/es/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/es/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From e77c9ce5690afdf78582a136cb5e0b11ad01940b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:10 -0500 Subject: [PATCH 0974/1534] New translations deleting-a-subgraph.mdx (Arabic) --- .../ar/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ar/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ar/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1807741026ae..5a4ac15e07fd 100644 --- a/website/src/pages/ar/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ar/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 092563ebb2772cba8b9ee87fd2f99456efb8dd72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:11 -0500 Subject: [PATCH 0975/1534] New translations deleting-a-subgraph.mdx (Czech) --- .../cs/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/cs/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/cs/subgraphs/developing/managing/deleting-a-subgraph.mdx index fb6241fd8526..77896e36a45d 100644 --- a/website/src/pages/cs/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/cs/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 84577b6e89d93459224c3eb32d7da065e7bc97cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:12 -0500 Subject: [PATCH 0976/1534] New translations deleting-a-subgraph.mdx (German) --- .../de/subgraphs/developing/managing/deleting-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/de/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/de/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1807741026ae..91c22f7c44ba 100644 --- a/website/src/pages/de/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/de/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,12 +1,12 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). > Deleting your subgraph will remove all published versions from The Graph Network, but it will remain visible on Graph Explorer and Subgraph Studio for users who have signaled on it. -## Step-by-Step +## Schritt für Schritt 1. Visit the subgraph's page on [Subgraph Studio](https://thegraph.com/studio/). From ddbd79cbd898208fad42b3f2331cad4626b9ab58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:13 -0500 Subject: [PATCH 0977/1534] New translations deleting-a-subgraph.mdx (Italian) --- .../it/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/it/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/it/subgraphs/developing/managing/deleting-a-subgraph.mdx index fa27461c260a..90a2eb4b7d33 100644 --- a/website/src/pages/it/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/it/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From b03658f47316f6a7daa0b30e0565f9f78d197361 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:14 -0500 Subject: [PATCH 0978/1534] New translations deleting-a-subgraph.mdx (Japanese) --- .../ja/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ja/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ja/subgraphs/developing/managing/deleting-a-subgraph.mdx index 39822d28a77e..6a9aef388d02 100644 --- a/website/src/pages/ja/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ja/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 741eadfe95d949190f350b41cfad7db19186d0df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:15 -0500 Subject: [PATCH 0979/1534] New translations deleting-a-subgraph.mdx (Korean) --- .../ko/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ko/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ko/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1807741026ae..5a4ac15e07fd 100644 --- a/website/src/pages/ko/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ko/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 9555ef9368276f4a3cbd91c3078c4b501b20a32c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:16 -0500 Subject: [PATCH 0980/1534] New translations deleting-a-subgraph.mdx (Dutch) --- .../nl/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/nl/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/nl/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1807741026ae..5a4ac15e07fd 100644 --- a/website/src/pages/nl/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/nl/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From fac1791494c2778f785647c97eb51564b3189efc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:17 -0500 Subject: [PATCH 0981/1534] New translations deleting-a-subgraph.mdx (Polish) --- .../pl/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pl/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/pl/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1807741026ae..5a4ac15e07fd 100644 --- a/website/src/pages/pl/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/pl/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 37d5881ef3e2987ec08e402ed54533ddc50c8bc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:18 -0500 Subject: [PATCH 0982/1534] New translations deleting-a-subgraph.mdx (Portuguese) --- .../pt/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/pt/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/pt/subgraphs/developing/managing/deleting-a-subgraph.mdx index e25db56bdcf3..d5305fe2cfbe 100644 --- a/website/src/pages/pt/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/pt/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From bef6b662e451a3acd7c3f6907213ef5658596ccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:19 -0500 Subject: [PATCH 0983/1534] New translations deleting-a-subgraph.mdx (Russian) --- .../ru/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ru/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ru/subgraphs/developing/managing/deleting-a-subgraph.mdx index 68d7658f63f9..5787620c079a 100644 --- a/website/src/pages/ru/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ru/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From d2c007e8443c081cf5d1faf17a7ecdb3ce9003b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:20 -0500 Subject: [PATCH 0984/1534] New translations deleting-a-subgraph.mdx (Swedish) --- .../sv/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/sv/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/sv/subgraphs/developing/managing/deleting-a-subgraph.mdx index eb286c4e4fc3..ae778febe161 100644 --- a/website/src/pages/sv/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/sv/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 778f95b3f8c2488e371225ddca714fbf46b83b2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:21 -0500 Subject: [PATCH 0985/1534] New translations deleting-a-subgraph.mdx (Turkish) --- .../tr/subgraphs/developing/managing/deleting-a-subgraph.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/src/pages/tr/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/tr/subgraphs/developing/managing/deleting-a-subgraph.mdx index 02bef8483798..e4564fc247f2 100644 --- a/website/src/pages/tr/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/tr/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). @@ -26,6 +26,6 @@ Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). ### Important Reminders - Once you delete a subgraph, it will **not** appear on Graph Explorer's homepage. However, users who have signaled on it will still be able to view it on their profile pages and remove their signal. -- Küratörler artık subgraph'e sinyal veremeyecek. +- Curators will not be able to signal on the subgraph anymore. - Subgraph'e halihazırda sinyal vermiş küratörler, sinyallerini ortalama hisse fiyatından geri çekebilir. - Deleted subgraphs will show an error message. From 0762aa01dd7f16b81f29a2f38f7f4d6a17ea3893 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:22 -0500 Subject: [PATCH 0986/1534] New translations deleting-a-subgraph.mdx (Ukrainian) --- .../uk/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/uk/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/uk/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1807741026ae..5a4ac15e07fd 100644 --- a/website/src/pages/uk/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/uk/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 5b7a4dc821f82e2f929ae8ddcedd73352db178d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:23 -0500 Subject: [PATCH 0987/1534] New translations deleting-a-subgraph.mdx (Chinese Simplified) --- .../zh/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/zh/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/zh/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1b2d8ab4ba7b..dff170e3730f 100644 --- a/website/src/pages/zh/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/zh/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 0fd670a920fcb80068ead82019962931093121a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:24 -0500 Subject: [PATCH 0988/1534] New translations deleting-a-subgraph.mdx (Urdu (Pakistan)) --- .../ur/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ur/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/ur/subgraphs/developing/managing/deleting-a-subgraph.mdx index c58c2c829e1d..f078c166db88 100644 --- a/website/src/pages/ur/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/ur/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From d560672ac6b38851282dcbc983ff4c21fe858b49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:25 -0500 Subject: [PATCH 0989/1534] New translations deleting-a-subgraph.mdx (Vietnamese) --- .../vi/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/vi/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/vi/subgraphs/developing/managing/deleting-a-subgraph.mdx index 1807741026ae..5a4ac15e07fd 100644 --- a/website/src/pages/vi/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/vi/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From c3daaa2db02cd15a18589c0e8f406cde731c22b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:26 -0500 Subject: [PATCH 0990/1534] New translations deleting-a-subgraph.mdx (Marathi) --- .../mr/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/mr/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/mr/subgraphs/developing/managing/deleting-a-subgraph.mdx index e8b5118eb2fc..cabf1261970a 100644 --- a/website/src/pages/mr/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/mr/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 1f7fd6ce5a5a584a6eae5a125e3c310049150bbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:27 -0500 Subject: [PATCH 0991/1534] New translations deleting-a-subgraph.mdx (Hindi) --- .../hi/subgraphs/developing/managing/deleting-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/hi/subgraphs/developing/managing/deleting-a-subgraph.mdx b/website/src/pages/hi/subgraphs/developing/managing/deleting-a-subgraph.mdx index e30dcc591775..e0889b86b0ab 100644 --- a/website/src/pages/hi/subgraphs/developing/managing/deleting-a-subgraph.mdx +++ b/website/src/pages/hi/subgraphs/developing/managing/deleting-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: Delete a Subgraph +title: Deleting a Subgraph --- Delete your subgraph using [Subgraph Studio](https://thegraph.com/studio/). From 3dbe86469f91e780081f8181249edc35ff15dae9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:28 -0500 Subject: [PATCH 0992/1534] New translations publishing-a-subgraph.mdx (Romanian) --- .../subgraphs/developing/publishing/publishing-a-subgraph.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/src/pages/ro/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/ro/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 443a91217bc2..dca943ad3152 100644 --- a/website/src/pages/ro/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/ro/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -72,7 +72,7 @@ Developers can add GRT signal to their subgraphs to incentivize Indexers to quer - Specific supported networks can be checked [here](/supported-networks/). > Adding signal to a subgraph which is not eligible for rewards will not attract additional Indexers. - +> > If your subgraph is eligible for rewards, it is recommended that you curate your own subgraph with at least 3,000 GRT in order to attract additional indexers to index your subgraph. The [Sunrise Upgrade Indexer](/archived/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs. However, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. From 688d4d1b0c71c4f5b3b11b64659a7420db6be5fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Rouleau?= Date: Fri, 14 Feb 2025 13:02:29 -0500 Subject: [PATCH 0993/1534] New translations publishing-a-subgraph.mdx (French) --- .../publishing/publishing-a-subgraph.mdx | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/website/src/pages/fr/subgraphs/developing/publishing/publishing-a-subgraph.mdx b/website/src/pages/fr/subgraphs/developing/publishing/publishing-a-subgraph.mdx index 47ac1f0ffa4f..19a14a1b0eb2 100644 --- a/website/src/pages/fr/subgraphs/developing/publishing/publishing-a-subgraph.mdx +++ b/website/src/pages/fr/subgraphs/developing/publishing/publishing-a-subgraph.mdx @@ -2,12 +2,12 @@ title: Publication d'un subgraph sur le réseau décentralisé --- -Une fois que vous avez [déployé votre subgraph sur Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) et qu'il est prêt à passer en production, vous pouvez le publier sur le réseau décentralisé. +Une fois que vous avez [déployé votre sous-graphe dans Subgraph Studio](/deploying/deploying-a-subgraph-to-studio/) et qu'il est prêt à être mis en production, vous pouvez le publier sur le réseau décentralisé. Lorsque vous publiez un subgraph sur le réseau décentralisé, vous le rendez disponible pour : -- [Les Curateurs](/resources/roles/curating/), qui peuvent commencer à le curer. -- [Les Indexeurs](/indexing/overview/), qui peuvent commencer à l’indexer. +- [Curateurs](/resources/roles/curating/) pour commencer la curation. +- [Indexeurs](/indexing/overview/) pour commencer à l'indexer. @@ -15,15 +15,15 @@ Consultez la liste des [réseaux pris en charge](/supported-networks/). ## Publication à partir de Subgraph Studio -1. Allez sur le tableau de bord de [Subgraph Studio](https://thegraph.com/studio/) +1. Accédez au tableau de bord de [Subgraph Studio](https://thegraph.com/studio/) 2. Cliquez sur le bouton **Publish** -3. Votre subgraph sera maintenant visible dans [Graph Explorer](https://thegraph.com/explorer/). +3. Votre subgraph est désormais visible dans [Graph Explorer](https://thegraph.com/explorer/). Toutes les versions publiées d'un subgraph existant peuvent : -- Être publiées sur Arbitrum One. [En savoir plus sur The Graph Network sur Arbitrum](/archived/arbitrum/arbitrum-faq/). +- Être publié sur Arbitrum One. [En savoir plus sur The Graph Network sur Arbitrum](/archived/arbitrum/arbitrum-faq/). -- Indexer des données sur l'un des [réseaux pris en charge](/supported-networks/), quel que soit le réseau sur lequel le subgraph a été publié. +- Indexer les données sur n'importe lequel des [réseaux pris en charge](/supported-networks/), quel que soit le réseau sur lequel le subgraph a été publié. ### Mise à jour des métadonnées d'un subgraph publié @@ -33,9 +33,9 @@ Toutes les versions publiées d'un subgraph existant peuvent : ## Publication à partir de la CLI -À partir de la version 0.73.0, vous pouvez également publier votre subgraph avec la [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). +Depuis la version 0.73.0, vous pouvez également publier votre subgraph avec [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). -1. Ouvrez la `graph-cli`. +1. Ouvrez le `graph-cli`. 2. Utilisez les commandes suivantes : `graph codegen && graph build` puis `graph publish`. 3. Une fenêtre s'ouvrira, vous permettant de connecter votre portefeuille, d'ajouter des métadonnées et de déployer votre subgraph finalisé sur le réseau de votre choix. @@ -46,18 +46,18 @@ Toutes les versions publiées d'un subgraph existant peuvent : Vous pouvez uploader votre build de subgraph sur un nœud IPFS spécifique et personnaliser davantage votre déploiement avec les options suivantes : ``` -USAGE +UTILISATION $ graph publish [SUBGRAPH-MANIFEST] [-h] [--protocol-network arbitrum-one|arbitrum-sepolia --subgraph-id ] [-i ] [--ipfs-hash ] [--webapp-url - ] + ] FLAGS - -h, --help Afficher l'aide du CLI. - -i, --ipfs= [default: https://api.thegraph.com/ipfs/api/v0] Téléchargez les résultats du build sur un nœud IPFS. - --ipfs-hash= Hash IPFS du manifeste du subgraph à déployer. - --protocol-network=